wangchuanjin 3 rokov pred
commit
5d97d47752
100 zmenil súbory, kde vykonal 16323 pridanie a 0 odobranie
  1. 27 0
      github.com/olivere/elastic/CONTRIBUTING.md
  2. 20 0
      github.com/olivere/elastic/LICENSE
  3. 388 0
      github.com/olivere/elastic/README.md
  4. 107 0
      github.com/olivere/elastic/alias.go
  5. 160 0
      github.com/olivere/elastic/aliases.go
  6. 301 0
      github.com/olivere/elastic/bulk.go
  7. 112 0
      github.com/olivere/elastic/bulk_delete_request.go
  8. 173 0
      github.com/olivere/elastic/bulk_index_request.go
  9. 17 0
      github.com/olivere/elastic/bulk_request.go
  10. 244 0
      github.com/olivere/elastic/bulk_update_request.go
  11. 28 0
      github.com/olivere/elastic/canonicalize.go
  12. 96 0
      github.com/olivere/elastic/clear_scroll.go
  13. 1145 0
      github.com/olivere/elastic/client.go
  14. 63 0
      github.com/olivere/elastic/cluster-test/README.md
  15. 185 0
      github.com/olivere/elastic/cluster_health.go
  16. 192 0
      github.com/olivere/elastic/cluster_state.go
  17. 90 0
      github.com/olivere/elastic/connection.go
  18. 152 0
      github.com/olivere/elastic/count.go
  19. 75 0
      github.com/olivere/elastic/create_index.go
  20. 26 0
      github.com/olivere/elastic/decoder.go
  21. 118 0
      github.com/olivere/elastic/delete.go
  22. 292 0
      github.com/olivere/elastic/delete_by_query.go
  23. 57 0
      github.com/olivere/elastic/delete_index.go
  24. 136 0
      github.com/olivere/elastic/delete_mapping.go
  25. 118 0
      github.com/olivere/elastic/delete_template.go
  26. 51 0
      github.com/olivere/elastic/doc.go
  27. 48 0
      github.com/olivere/elastic/errors.go
  28. 71 0
      github.com/olivere/elastic/exists.go
  29. 329 0
      github.com/olivere/elastic/explain.go
  30. 74 0
      github.com/olivere/elastic/fetch_source_context.go
  31. 9 0
      github.com/olivere/elastic/filter.go
  32. 167 0
      github.com/olivere/elastic/flush.go
  33. 47 0
      github.com/olivere/elastic/geo_point.go
  34. 223 0
      github.com/olivere/elastic/get.go
  35. 172 0
      github.com/olivere/elastic/get_mapping.go
  36. 113 0
      github.com/olivere/elastic/get_template.go
  37. 496 0
      github.com/olivere/elastic/highlight.go
  38. 217 0
      github.com/olivere/elastic/index.go
  39. 145 0
      github.com/olivere/elastic/index_close.go
  40. 50 0
      github.com/olivere/elastic/index_exists.go
  41. 186 0
      github.com/olivere/elastic/index_get.go
  42. 189 0
      github.com/olivere/elastic/index_get_settings.go
  43. 146 0
      github.com/olivere/elastic/index_open.go
  44. 122 0
      github.com/olivere/elastic/indices_delete_template.go
  45. 107 0
      github.com/olivere/elastic/indices_exists_template.go
  46. 155 0
      github.com/olivere/elastic/indices_exists_type.go
  47. 128 0
      github.com/olivere/elastic/indices_get_template.go
  48. 179 0
      github.com/olivere/elastic/indices_put_template.go
  49. 385 0
      github.com/olivere/elastic/indices_stats.go
  50. 194 0
      github.com/olivere/elastic/multi_get.go
  51. 101 0
      github.com/olivere/elastic/multi_search.go
  52. 311 0
      github.com/olivere/elastic/nodes_info.go
  53. 135 0
      github.com/olivere/elastic/optimize.go
  54. 301 0
      github.com/olivere/elastic/percolate.go
  55. 117 0
      github.com/olivere/elastic/ping.go
  56. 222 0
      github.com/olivere/elastic/put_mapping.go
  57. 152 0
      github.com/olivere/elastic/put_template.go
  58. 14 0
      github.com/olivere/elastic/query.go
  59. 99 0
      github.com/olivere/elastic/refresh.go
  60. 222 0
      github.com/olivere/elastic/reindexer.go
  61. 59 0
      github.com/olivere/elastic/request.go
  62. 40 0
      github.com/olivere/elastic/rescore.go
  63. 59 0
      github.com/olivere/elastic/rescorer.go
  64. 43 0
      github.com/olivere/elastic/response.go
  65. 273 0
      github.com/olivere/elastic/scan.go
  66. 219 0
      github.com/olivere/elastic/scroll.go
  67. 507 0
      github.com/olivere/elastic/search.go
  68. 916 0
      github.com/olivere/elastic/search_aggs.go
  69. 109 0
      github.com/olivere/elastic/search_aggs_avg.go
  70. 128 0
      github.com/olivere/elastic/search_aggs_cardinality.go
  71. 57 0
      github.com/olivere/elastic/search_aggs_children.go
  72. 303 0
      github.com/olivere/elastic/search_aggs_date_histogram.go
  73. 243 0
      github.com/olivere/elastic/search_aggs_date_range.go
  74. 108 0
      github.com/olivere/elastic/search_aggs_extended_stats.go
  75. 58 0
      github.com/olivere/elastic/search_aggs_filter.go
  76. 76 0
      github.com/olivere/elastic/search_aggs_filters.go
  77. 104 0
      github.com/olivere/elastic/search_aggs_geo_bounds.go
  78. 180 0
      github.com/olivere/elastic/search_aggs_geo_distance.go
  79. 56 0
      github.com/olivere/elastic/search_aggs_global.go
  80. 234 0
      github.com/olivere/elastic/search_aggs_histogram.go
  81. 109 0
      github.com/olivere/elastic/search_aggs_max.go
  82. 109 0
      github.com/olivere/elastic/search_aggs_min.go
  83. 66 0
      github.com/olivere/elastic/search_aggs_missing.go
  84. 67 0
      github.com/olivere/elastic/search_aggs_nested.go
  85. 141 0
      github.com/olivere/elastic/search_aggs_percentile_ranks.go
  86. 140 0
      github.com/olivere/elastic/search_aggs_percentiles.go
  87. 232 0
      github.com/olivere/elastic/search_aggs_range.go
  88. 96 0
      github.com/olivere/elastic/search_aggs_significant_terms.go
  89. 108 0
      github.com/olivere/elastic/search_aggs_stats.go
  90. 108 0
      github.com/olivere/elastic/search_aggs_sum.go
  91. 339 0
      github.com/olivere/elastic/search_aggs_terms.go
  92. 150 0
      github.com/olivere/elastic/search_aggs_tophits.go
  93. 111 0
      github.com/olivere/elastic/search_aggs_value_count.go
  94. 12 0
      github.com/olivere/elastic/search_facets.go
  95. 198 0
      github.com/olivere/elastic/search_facets_date_histogram.go
  96. 68 0
      github.com/olivere/elastic/search_facets_filter.go
  97. 202 0
      github.com/olivere/elastic/search_facets_geo_distance.go
  98. 110 0
      github.com/olivere/elastic/search_facets_histogram.go
  99. 120 0
      github.com/olivere/elastic/search_facets_histogram_script.go
  100. 66 0
      github.com/olivere/elastic/search_facets_query.go

+ 27 - 0
github.com/olivere/elastic/CONTRIBUTING.md

@@ -0,0 +1,27 @@
+# How to contribute
+
+Elastic is an open-source project and we are looking forward to each
+contribution.
+
+## Your Pull Request
+
+To make it easy to review and understand your changes, please keep the
+following things in mind before submitting your pull request:
+
+* Work on the latest possible state of `olivere/elastic`.
+* Create a branch dedicated to your change.
+* If possible, write a test case which confirms your change.
+* Make sure your changes and your tests work with all recent versions of
+  Elasticsearch. At the moment, we're targeting the current and the previous
+  release, e.g. the 1.4 and the 1.3 branch.
+* Test your changes before creating a pull request (`go test ./...`).
+* Don't mix several features or bug fixes in one pull request.
+* Create a meaningful commit message.
+* Explain your change, e.g. provide a link to the issue you are fixing and
+  probably a link to the Elasticsearch documentation and/or source code.
+* Format your source with `go fmt`.
+
+## Additional Resources
+
+* [GitHub documentation](http://help.github.com/)
+* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)

+ 20 - 0
github.com/olivere/elastic/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+Copyright © 2012-2015 Oliver Eilhard
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the “Software”), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.

+ 388 - 0
github.com/olivere/elastic/README.md

@@ -0,0 +1,388 @@
+# Elastic
+
+Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
+[Go](http://www.golang.org/) programming language.
+
+[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=master)](https://travis-ci.org/olivere/elastic)
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/olivere/elastic)
+[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
+
+See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
+
+
+## Releases
+
+### Current version
+
+This is the source code of the current version of Elastic (version 2).
+
+### Earlier versions
+
+If you came from an earlier version and found that you cannot update, don't
+worry. Earlier versions are still available. All you need to do is go-get
+them and change your import path. See below for details. Here's what you
+need to do to use Elastic version 1:
+
+```sh
+$ go get gopkg.in/olivere/elastic.v1
+```
+
+Then change your import path:
+
+```go
+import "gopkg.in/olivere/elastic.v1"
+```
+
+
+## Status
+
+We use Elastic in production since 2012. Although Elastic is quite stable
+from our experience, we don't have a stable API yet. The reason for this
+is that Elasticsearch changes quite often and at a fast pace.
+At this moment we focus on features, not on a stable API.
+
+Having said that, there have been no big API changes that required you
+to rewrite your application big time.
+More often than not it's renaming APIs and adding/removing features
+so that we are in sync with the Elasticsearch API.
+
+Elastic supports and has been tested in production with
+the following Elasticsearch versions: 0.90, 1.0, 1.1, 1.2, 1.3, and 1.4.
+
+Elasticsearch has quite a few features. A lot of them are
+not yet implemented in Elastic (see below for details).
+I add features and APIs as required. It's straightforward
+to implement missing pieces. I'm accepting pull requests :-)
+
+Having said that, I hope you find the project useful.
+
+
+## Usage
+
+The first thing you do is to create a Client. The client connects to
+Elasticsearch on http://127.0.0.1:9200 by default.
+
+You typically create one client for your app. Here's a complete example.
+
+```go
+// Create a client
+client, err := elastic.NewClient()
+if err != nil {
+    // Handle error
+}
+
+// Create an index
+_, err = client.CreateIndex("twitter").Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+
+// Add a document to the index
+tweet := Tweet{User: "olivere", Message: "Take Five"}
+_, err = client.Index().
+    Index("twitter").
+    Type("tweet").
+    Id("1").
+    BodyJson(tweet).
+    Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+
+// Search with a term query
+termQuery := elastic.NewTermQuery("user", "olivere")
+searchResult, err := client.Search().
+    Index("twitter").   // search in index "twitter"
+    Query(&termQuery).  // specify the query
+    Sort("user", true). // sort by "user" field, ascending
+    From(0).Size(10).   // take documents 0-9
+    Debug(true).        // print request and response to stdout
+    Pretty(true).       // pretty print request and response JSON
+    Do()                // execute
+if err != nil {
+    // Handle error
+    panic(err)
+}
+
+// searchResult is of type SearchResult and returns hits, suggestions,
+// and all kinds of other information from Elasticsearch.
+fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+// Each is a convenience function that iterates over hits in a search result.
+// It makes sure you don't need to check for nil values in the response.
+// However, it ignores errors in serialization. If you want full control
+// over iterating the hits, see below.
+var ttyp Tweet
+for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+    t := item.(Tweet)
+    fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+}
+// TotalHits is another convenience function that works even when something goes wrong.
+fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+// Here's how you iterate through results with full control over each step.
+if searchResult.Hits != nil {
+    fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+    // Iterate through results
+    for _, hit := range searchResult.Hits.Hits {
+        // hit.Index contains the name of the index
+
+        // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+        var t Tweet
+        err := json.Unmarshal(*hit.Source, &t)
+        if err != nil {
+            // Deserialization failed
+        }
+
+        // Work with tweet
+        fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+    }
+} else {
+    // No hits
+    fmt.Print("Found no tweets\n")
+}
+
+// Delete the index again
+_, err = client.DeleteIndex("twitter").Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+```
+
+See the [wiki](/olivere/elastic/wiki) for more details.
+
+
+## API Status
+
+Here's the current API status.
+
+### APIs
+
+- [x] Search (most queries, filters, facets, aggregations etc. are implemented: see below)
+- [x] Index
+- [x] Get
+- [x] Delete
+- [x] Delete By Query
+- [x] Update
+- [x] Multi Get
+- [x] Bulk
+- [ ] Bulk UDP
+- [ ] Term vectors
+- [ ] Multi term vectors
+- [x] Count
+- [ ] Validate
+- [x] Explain
+- [x] Search
+- [ ] Search shards
+- [x] Search template
+- [x] Facets (most are implemented, see below)
+- [x] Aggregates (most are implemented, see below)
+- [x] Multi Search
+- [x] Percolate
+- [ ] More like this
+- [ ] Benchmark
+
+### Indices
+
+- [x] Create index
+- [x] Delete index
+- [x] Get index
+- [x] Indices exists
+- [x] Open/close index
+- [x] Put mapping
+- [x] Get mapping
+- [ ] Get field mapping
+- [x] Types exist
+- [x] Delete mapping
+- [x] Index aliases
+- [ ] Update indices settings
+- [x] Get settings
+- [ ] Analyze
+- [x] Index templates
+- [ ] Warmers
+- [ ] Status
+- [x] Indices stats
+- [ ] Indices segments
+- [ ] Indices recovery
+- [ ] Clear cache
+- [x] Flush
+- [x] Refresh
+- [x] Optimize
+- [ ] Upgrade
+
+### Snapshot and Restore
+
+- [ ] Snapshot
+- [ ] Restore
+- [ ] Snapshot status
+- [ ] Monitoring snapshot/restore progress
+- [ ] Partial restore
+
+### Cat APIs
+
+Not implemented. Those are better suited for operating with Elasticsearch
+on the command line.
+
+### Cluster
+
+- [x] Health
+- [x] State
+- [ ] Stats
+- [ ] Pending cluster tasks
+- [ ] Cluster reroute
+- [ ] Cluster update settings
+- [ ] Nodes stats
+- [x] Nodes info
+- [ ] Nodes hot_threads
+- [ ] Nodes shutdown
+
+### Query DSL
+
+#### Queries
+
+- [x] `match`
+- [x] `multi_match`
+- [x] `bool`
+- [x] `boosting`
+- [ ] `common_terms`
+- [ ] `constant_score`
+- [x] `dis_max`
+- [x] `filtered`
+- [x] `fuzzy_like_this_query` (`flt`)
+- [x] `fuzzy_like_this_field_query` (`flt_field`)
+- [x] `function_score`
+- [x] `fuzzy`
+- [ ] `geo_shape`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `match_all`
+- [x] `mlt`
+- [x] `mlt_field`
+- [x] `nested`
+- [x] `prefix`
+- [x] `query_string`
+- [x] `simple_query_string`
+- [x] `range`
+- [x] `regexp`
+- [ ] `span_first`
+- [ ] `span_multi_term`
+- [ ] `span_near`
+- [ ] `span_not`
+- [ ] `span_or`
+- [ ] `span_term`
+- [x] `term`
+- [x] `terms`
+- [ ] `top_children`
+- [x] `wildcard`
+- [ ] `minimum_should_match`
+- [ ] `multi_term_query_rewrite`
+- [x] `template_query`
+
+#### Filters
+
+- [x] `and`
+- [x] `bool`
+- [x] `exists`
+- [ ] `geo_bounding_box`
+- [x] `geo_distance`
+- [ ] `geo_distance_range`
+- [x] `geo_polygon`
+- [ ] `geoshape`
+- [ ] `geohash`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `limit`
+- [x] `match_all`
+- [x] `missing`
+- [x] `nested`
+- [x] `not`
+- [x] `or`
+- [x] `prefix`
+- [x] `query`
+- [x] `range`
+- [x] `regexp`
+- [ ] `script`
+- [x] `term`
+- [x] `terms`
+- [x] `type`
+
+### Facets
+
+- [x] Terms
+- [x] Range
+- [x] Histogram
+- [x] Date Histogram
+- [x] Filter
+- [x] Query
+- [x] Statistical
+- [x] Terms Stats
+- [x] Geo Distance
+
+### Aggregations
+
+- [x] min
+- [x] max
+- [x] sum
+- [x] avg
+- [x] stats
+- [x] extended stats
+- [x] value count
+- [x] percentiles
+- [x] percentile ranks
+- [x] cardinality
+- [x] geo bounds
+- [x] top hits
+- [ ] scripted metric
+- [x] global
+- [x] filter
+- [x] filters
+- [x] missing
+- [x] nested
+- [x] reverse nested
+- [x] children
+- [x] terms
+- [x] significant terms
+- [x] range
+- [x] date range
+- [x] ipv4 range
+- [x] histogram
+- [x] date histogram
+- [x] geo distance
+- [x] geohash grid
+
+### Sorting
+
+- [x] Sort by score
+- [x] Sort by field
+- [x] Sort by geo distance
+- [x] Sort by script
+
+### Scan
+
+Scrolling through documents (e.g. `search_type=scan`) are implemented via
+the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well.
+
+## How to contribute
+
+Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
+
+## Credits
+
+Thanks a lot for the great folks working hard on
+[Elasticsearch](http://www.elasticsearch.org/)
+and
+[Go](http://www.golang.org/).
+
+## LICENSE
+
+MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
+or the LICENSE file provided in the repository for details.
+

+ 107 - 0
github.com/olivere/elastic/alias.go

@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+)
+
+type AliasService struct {
+	client  *Client
+	actions []aliasAction
+	pretty  bool
+}
+
+type aliasAction struct {
+	// "add" or "remove"
+	Type string
+	// Index name
+	Index string
+	// Alias name
+	Alias string
+	// Filter
+	Filter *Filter
+}
+
+func NewAliasService(client *Client) *AliasService {
+	builder := &AliasService{
+		client:  client,
+		actions: make([]aliasAction, 0),
+	}
+	return builder
+}
+
+func (s *AliasService) Pretty(pretty bool) *AliasService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
+	action := aliasAction{Type: "add", Index: indexName, Alias: aliasName}
+	s.actions = append(s.actions, action)
+	return s
+}
+
+func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter *Filter) *AliasService {
+	action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter}
+	s.actions = append(s.actions, action)
+	return s
+}
+
+func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
+	action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName}
+	s.actions = append(s.actions, action)
+	return s
+}
+
+func (s *AliasService) Do() (*AliasResult, error) {
+	// Build url
+	path := "/_aliases"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Actions
+	body := make(map[string]interface{})
+	actionsJson := make([]interface{}, 0)
+
+	for _, action := range s.actions {
+		actionJson := make(map[string]interface{})
+		detailsJson := make(map[string]interface{})
+		detailsJson["index"] = action.Index
+		detailsJson["alias"] = action.Alias
+		if action.Filter != nil {
+			detailsJson["filter"] = (*action.Filter).Source()
+		}
+		actionJson[action.Type] = detailsJson
+		actionsJson = append(actionsJson, actionJson)
+	}
+
+	body["actions"] = actionsJson
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return results
+	ret := new(AliasResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasResult struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 160 - 0
github.com/olivere/elastic/aliases.go

@@ -0,0 +1,160 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type AliasesService struct {
+	client  *Client
+	indices []string
+	pretty  bool
+}
+
+func NewAliasesService(client *Client) *AliasesService {
+	builder := &AliasesService{
+		client:  client,
+		indices: make([]string, 0),
+	}
+	return builder
+}
+
+func (s *AliasesService) Pretty(pretty bool) *AliasesService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *AliasesService) Index(indexName string) *AliasesService {
+	s.indices = append(s.indices, indexName)
+	return s
+}
+
+func (s *AliasesService) Indices(indexNames ...string) *AliasesService {
+	s.indices = append(s.indices, indexNames...)
+	return s
+}
+
+func (s *AliasesService) Do() (*AliasesResult, error) {
+	var err error
+
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err = uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	path += strings.Join(indexPart, ",")
+
+	// TODO Add types here
+
+	// Search
+	path += "/_aliases"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// {
+	//   "indexName" : {
+	//     "aliases" : {
+	//       "alias1" : { },
+	//       "alias2" : { }
+	//     }
+	//   },
+	//   "indexName2" : {
+	//     ...
+	//   },
+	// }
+	indexMap := make(map[string]interface{})
+	if err := json.Unmarshal(res.Body, &indexMap); err != nil {
+		return nil, err
+	}
+
+	// Each (indexName, _)
+	ret := &AliasesResult{
+		Indices: make(map[string]indexResult),
+	}
+	for indexName, indexData := range indexMap {
+		indexOut, found := ret.Indices[indexName]
+		if !found {
+			indexOut = indexResult{Aliases: make([]aliasResult, 0)}
+		}
+
+		// { "aliases" : { ... } }
+		indexDataMap, ok := indexData.(map[string]interface{})
+		if ok {
+			aliasesData, ok := indexDataMap["aliases"].(map[string]interface{})
+			if ok {
+				for aliasName, _ := range aliasesData {
+					aliasRes := aliasResult{AliasName: aliasName}
+					indexOut.Aliases = append(indexOut.Aliases, aliasRes)
+				}
+			}
+		}
+
+		ret.Indices[indexName] = indexOut
+	}
+
+	return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasesResult struct {
+	Indices map[string]indexResult
+}
+
+type indexResult struct {
+	Aliases []aliasResult
+}
+
+type aliasResult struct {
+	AliasName string
+}
+
+func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
+	indices := make([]string, 0)
+
+	for indexName, indexInfo := range ar.Indices {
+		for _, aliasInfo := range indexInfo.Aliases {
+			if aliasInfo.AliasName == aliasName {
+				indices = append(indices, indexName)
+			}
+		}
+	}
+
+	return indices
+}
+
+func (ir indexResult) HasAlias(aliasName string) bool {
+	for _, alias := range ir.Aliases {
+		if alias.AliasName == aliasName {
+			return true
+		}
+	}
+	return false
+}

+ 301 - 0
github.com/olivere/elastic/bulk.go

@@ -0,0 +1,301 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type BulkService struct {
+	client *Client
+
+	index    string
+	_type    string
+	requests []BulkableRequest
+	//replicationType string
+	//consistencyLevel string
+	timeout string
+	refresh *bool
+	pretty  bool
+}
+
+func NewBulkService(client *Client) *BulkService {
+	builder := &BulkService{
+		client:   client,
+		requests: make([]BulkableRequest, 0),
+	}
+	return builder
+}
+
+func (s *BulkService) reset() {
+	s.requests = make([]BulkableRequest, 0)
+}
+
+func (s *BulkService) Index(index string) *BulkService {
+	s.index = index
+	return s
+}
+
+func (s *BulkService) Type(_type string) *BulkService {
+	s._type = _type
+	return s
+}
+
+func (s *BulkService) Timeout(timeout string) *BulkService {
+	s.timeout = timeout
+	return s
+}
+
+func (s *BulkService) Refresh(refresh bool) *BulkService {
+	s.refresh = &refresh
+	return s
+}
+
+func (s *BulkService) Pretty(pretty bool) *BulkService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *BulkService) Add(r BulkableRequest) *BulkService {
+	s.requests = append(s.requests, r)
+	return s
+}
+
+func (s *BulkService) NumberOfActions() int {
+	return len(s.requests)
+}
+
+func (s *BulkService) bodyAsString() (string, error) {
+	buf := bytes.NewBufferString("")
+
+	for _, req := range s.requests {
+		source, err := req.Source()
+		if err != nil {
+			return "", err
+		}
+		for _, line := range source {
+			_, err := buf.WriteString(fmt.Sprintf("%s\n", line))
+			if err != nil {
+				return "", nil
+			}
+		}
+	}
+
+	return buf.String(), nil
+}
+
+func (s *BulkService) Do() (*BulkResponse, error) {
+	// No actions?
+	if s.NumberOfActions() == 0 {
+		return nil, errors.New("elastic: No bulk actions to commit")
+	}
+
+	// Get body
+	body, err := s.bodyAsString()
+	if err != nil {
+		return nil, err
+	}
+
+	// Build url
+	path := "/"
+	if s.index != "" {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": s.index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		path += index + "/"
+	}
+	if s._type != "" {
+		typ, err := uritemplates.Expand("{type}", map[string]string{
+			"type": s._type,
+		})
+		if err != nil {
+			return nil, err
+		}
+		path += typ + "/"
+	}
+	path += "_bulk"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.refresh != nil {
+		params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return results
+	ret := new(BulkResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+
+	// Reset so the request can be reused
+	s.reset()
+
+	return ret, nil
+}
+
+// BulkResponse is a response to a bulk execution.
+//
+// Example:
+// {
+//   "took":3,
+//   "errors":false,
+//   "items":[{
+//     "index":{
+//       "_index":"index1",
+//       "_type":"tweet",
+//       "_id":"1",
+//       "_version":3,
+//       "status":201
+//     }
+//   },{
+//     "index":{
+//       "_index":"index2",
+//       "_type":"tweet",
+//       "_id":"2",
+//       "_version":3,
+//       "status":200
+//     }
+//   },{
+//     "delete":{
+//       "_index":"index1",
+//       "_type":"tweet",
+//       "_id":"1",
+//       "_version":4,
+//       "status":200,
+//       "found":true
+//     }
+//   },{
+//     "update":{
+//       "_index":"index2",
+//       "_type":"tweet",
+//       "_id":"2",
+//       "_version":4,
+//       "status":200
+//     }
+//   }]
+// }
+type BulkResponse struct {
+	Took   int                            `json:"took,omitempty"`
+	Errors bool                           `json:"errors,omitempty"`
+	Items  []map[string]*BulkResponseItem `json:"items,omitempty"`
+}
+
+// BulkResponseItem is the result of a single bulk request.
+type BulkResponseItem struct {
+	Index   string `json:"_index,omitempty"`
+	Type    string `json:"_type,omitempty"`
+	Id      string `json:"_id,omitempty"`
+	Version int    `json:"_version,omitempty"`
+	Status  int    `json:"status,omitempty"`
+	Found   bool   `json:"found,omitempty"`
+	Error   string `json:"error,omitempty"`
+}
+
+// Indexed returns all bulk request results of "index" actions.
+func (r *BulkResponse) Indexed() []*BulkResponseItem {
+	return r.ByAction("index")
+}
+
+// Created returns all bulk request results of "create" actions.
+func (r *BulkResponse) Created() []*BulkResponseItem {
+	return r.ByAction("create")
+}
+
+// Updated returns all bulk request results of "update" actions.
+func (r *BulkResponse) Updated() []*BulkResponseItem {
+	return r.ByAction("update")
+}
+
+// Deleted returns all bulk request results of "delete" actions.
+func (r *BulkResponse) Deleted() []*BulkResponseItem {
+	return r.ByAction("delete")
+}
+
+// ByAction returns all bulk request results of a certain action,
+// e.g. "index" or "delete".
+func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	items := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		if result, found := item[action]; found {
+			items = append(items, result)
+		}
+	}
+	return items
+}
+
+// ById returns all bulk request results of a given document id,
+// regardless of the action ("index", "delete" etc.).
+func (r *BulkResponse) ById(id string) []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	items := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		for _, result := range item {
+			if result.Id == id {
+				items = append(items, result)
+			}
+		}
+	}
+	return items
+}
+
+// Failed returns those items of a bulk response that have errors,
+// i.e. those that don't have a status code between 200 and 299.
+func (r *BulkResponse) Failed() []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	errors := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		for _, result := range item {
+			if !(result.Status >= 200 && result.Status <= 299) {
+				errors = append(errors, result)
+			}
+		}
+	}
+	return errors
+}
+
+// Succeeded returns those items of a bulk response that have no errors,
+// i.e. those have a status code between 200 and 299.
+func (r *BulkResponse) Succeeded() []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	succeeded := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		for _, result := range item {
+			if result.Status >= 200 && result.Status <= 299 {
+				succeeded = append(succeeded, result)
+			}
+		}
+	}
+	return succeeded
+}

+ 112 - 0
github.com/olivere/elastic/bulk_delete_request.go

@@ -0,0 +1,112 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// -- Bulk delete request --
+
+// Bulk request to remove document from Elasticsearch.
+type BulkDeleteRequest struct {
+	BulkableRequest
+	index       string
+	typ         string
+	id          string
+	routing     string
+	refresh     *bool
+	version     int64  // default is MATCH_ANY
+	versionType string // default is "internal"
+}
+
+func NewBulkDeleteRequest() *BulkDeleteRequest {
+	return &BulkDeleteRequest{}
+}
+
+func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
+	r.index = index
+	return r
+}
+
+func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
+	r.typ = typ
+	return r
+}
+
+func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
+	r.id = id
+	return r
+}
+
+func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
+	r.routing = routing
+	return r
+}
+
+func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest {
+	r.refresh = &refresh
+	return r
+}
+
+func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
+	r.version = version
+	return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
+	r.versionType = versionType
+	return r
+}
+
+func (r *BulkDeleteRequest) String() string {
+	lines, err := r.Source()
+	if err == nil {
+		return strings.Join(lines, "\n")
+	}
+	return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkDeleteRequest) Source() ([]string, error) {
+	lines := make([]string, 1)
+
+	source := make(map[string]interface{})
+	deleteCommand := make(map[string]interface{})
+	if r.index != "" {
+		deleteCommand["_index"] = r.index
+	}
+	if r.typ != "" {
+		deleteCommand["_type"] = r.typ
+	}
+	if r.id != "" {
+		deleteCommand["_id"] = r.id
+	}
+	if r.routing != "" {
+		deleteCommand["_routing"] = r.routing
+	}
+	if r.version > 0 {
+		deleteCommand["_version"] = r.version
+	}
+	if r.versionType != "" {
+		deleteCommand["_version_type"] = r.versionType
+	}
+	if r.refresh != nil {
+		deleteCommand["refresh"] = *r.refresh
+	}
+	source["delete"] = deleteCommand
+
+	body, err := json.Marshal(source)
+	if err != nil {
+		return nil, err
+	}
+
+	lines[0] = string(body)
+
+	return lines, nil
+}

+ 173 - 0
github.com/olivere/elastic/bulk_index_request.go

@@ -0,0 +1,173 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// Bulk request to add document to Elasticsearch.
+type BulkIndexRequest struct {
+	BulkableRequest
+	index       string
+	typ         string
+	id          string
+	opType      string
+	routing     string
+	parent      string
+	timestamp   string
+	ttl         int64
+	refresh     *bool
+	version     int64  // default is MATCH_ANY
+	versionType string // default is "internal"
+	doc         interface{}
+}
+
+func NewBulkIndexRequest() *BulkIndexRequest {
+	return &BulkIndexRequest{
+		opType: "index",
+	}
+}
+
+func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
+	r.index = index
+	return r
+}
+
+func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
+	r.typ = typ
+	return r
+}
+
+func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
+	r.id = id
+	return r
+}
+
+func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
+	r.opType = opType
+	return r
+}
+
+func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
+	r.routing = routing
+	return r
+}
+
+func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
+	r.parent = parent
+	return r
+}
+
+func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest {
+	r.timestamp = timestamp
+	return r
+}
+
+func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest {
+	r.ttl = ttl
+	return r
+}
+
+func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest {
+	r.refresh = &refresh
+	return r
+}
+
+func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
+	r.version = version
+	return r
+}
+
+func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
+	r.versionType = versionType
+	return r
+}
+
+func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
+	r.doc = doc
+	return r
+}
+
+func (r *BulkIndexRequest) String() string {
+	lines, err := r.Source()
+	if err == nil {
+		return strings.Join(lines, "\n")
+	}
+	return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkIndexRequest) Source() ([]string, error) {
+	// { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
+	// { "field1" : "value1" }
+
+	lines := make([]string, 2)
+
+	// "index" ...
+	command := make(map[string]interface{})
+	indexCommand := make(map[string]interface{})
+	if r.index != "" {
+		indexCommand["_index"] = r.index
+	}
+	if r.typ != "" {
+		indexCommand["_type"] = r.typ
+	}
+	if r.id != "" {
+		indexCommand["_id"] = r.id
+	}
+	if r.routing != "" {
+		indexCommand["_routing"] = r.routing
+	}
+	if r.parent != "" {
+		indexCommand["_parent"] = r.parent
+	}
+	if r.timestamp != "" {
+		indexCommand["_timestamp"] = r.timestamp
+	}
+	if r.ttl > 0 {
+		indexCommand["_ttl"] = r.ttl
+	}
+	if r.version > 0 {
+		indexCommand["_version"] = r.version
+	}
+	if r.versionType != "" {
+		indexCommand["_version_type"] = r.versionType
+	}
+	if r.refresh != nil {
+		indexCommand["refresh"] = *r.refresh
+	}
+	command[r.opType] = indexCommand
+	line, err := json.Marshal(command)
+	if err != nil {
+		return nil, err
+	}
+	lines[0] = string(line)
+
+	// "field1" ...
+	if r.doc != nil {
+		switch t := r.doc.(type) {
+		default:
+			body, err := json.Marshal(r.doc)
+			if err != nil {
+				return nil, err
+			}
+			lines[1] = string(body)
+		case json.RawMessage:
+			lines[1] = string(t)
+		case *json.RawMessage:
+			lines[1] = string(*t)
+		case string:
+			lines[1] = t
+		case *string:
+			lines[1] = *t
+		}
+	} else {
+		lines[1] = "{}"
+	}
+
+	return lines, nil
+}

+ 17 - 0
github.com/olivere/elastic/bulk_request.go

@@ -0,0 +1,17 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+)
+
+// -- Bulkable request (index/update/delete) --
+
+// Generic interface to bulkable requests.
+type BulkableRequest interface {
+	fmt.Stringer
+	Source() ([]string, error)
+}

+ 244 - 0
github.com/olivere/elastic/bulk_update_request.go

@@ -0,0 +1,244 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// Bulk request to update document in Elasticsearch.
+type BulkUpdateRequest struct {
+	BulkableRequest
+	index string
+	typ   string
+	id    string
+
+	routing         string
+	parent          string
+	script          string
+	scriptType      string
+	scriptLang      string
+	scriptParams    map[string]interface{}
+	version         int64  // default is MATCH_ANY
+	versionType     string // default is "internal"
+	retryOnConflict *int
+	refresh         *bool
+	upsert          interface{}
+	docAsUpsert     *bool
+	doc             interface{}
+	ttl             int64
+	timestamp       string
+}
+
+func NewBulkUpdateRequest() *BulkUpdateRequest {
+	return &BulkUpdateRequest{}
+}
+
+func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
+	r.index = index
+	return r
+}
+
+func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
+	r.typ = typ
+	return r
+}
+
+func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
+	r.id = id
+	return r
+}
+
+func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
+	r.routing = routing
+	return r
+}
+
+func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
+	r.parent = parent
+	return r
+}
+
+func (r *BulkUpdateRequest) Script(script string) *BulkUpdateRequest {
+	r.script = script
+	return r
+}
+
+func (r *BulkUpdateRequest) ScriptType(scriptType string) *BulkUpdateRequest {
+	r.scriptType = scriptType
+	return r
+}
+
+func (r *BulkUpdateRequest) ScriptLang(scriptLang string) *BulkUpdateRequest {
+	r.scriptLang = scriptLang
+	return r
+}
+
+func (r *BulkUpdateRequest) ScriptParams(params map[string]interface{}) *BulkUpdateRequest {
+	r.scriptParams = params
+	return r
+}
+
+func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
+	r.retryOnConflict = &retryOnConflict
+	return r
+}
+
+func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
+	r.version = version
+	return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
+	r.versionType = versionType
+	return r
+}
+
+func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest {
+	r.refresh = &refresh
+	return r
+}
+
+func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
+	r.doc = doc
+	return r
+}
+
+func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
+	r.docAsUpsert = &docAsUpsert
+	return r
+}
+
+func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
+	r.upsert = doc
+	return r
+}
+
+func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest {
+	r.ttl = ttl
+	return r
+}
+
+func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest {
+	r.timestamp = timestamp
+	return r
+}
+
+func (r *BulkUpdateRequest) String() string {
+	lines, err := r.Source()
+	if err == nil {
+		return strings.Join(lines, "\n")
+	}
+	return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) {
+	switch t := data.(type) {
+	default:
+		body, err := json.Marshal(data)
+		if err != nil {
+			return "", err
+		}
+		return string(body), nil
+	case json.RawMessage:
+		return string(t), nil
+	case *json.RawMessage:
+		return string(*t), nil
+	case string:
+		return t, nil
+	case *string:
+		return *t, nil
+	}
+}
+
+func (r BulkUpdateRequest) Source() ([]string, error) {
+	// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+	// { "doc" : { "field1" : "value1", ... } }
+	// or
+	// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+	// { "script" : { ... } }
+
+	lines := make([]string, 2)
+
+	// "update" ...
+	command := make(map[string]interface{})
+	updateCommand := make(map[string]interface{})
+	if r.index != "" {
+		updateCommand["_index"] = r.index
+	}
+	if r.typ != "" {
+		updateCommand["_type"] = r.typ
+	}
+	if r.id != "" {
+		updateCommand["_id"] = r.id
+	}
+	if r.routing != "" {
+		updateCommand["_routing"] = r.routing
+	}
+	if r.parent != "" {
+		updateCommand["_parent"] = r.parent
+	}
+	if r.timestamp != "" {
+		updateCommand["_timestamp"] = r.timestamp
+	}
+	if r.ttl > 0 {
+		updateCommand["_ttl"] = r.ttl
+	}
+	if r.version > 0 {
+		updateCommand["_version"] = r.version
+	}
+	if r.versionType != "" {
+		updateCommand["_version_type"] = r.versionType
+	}
+	if r.refresh != nil {
+		updateCommand["refresh"] = *r.refresh
+	}
+	if r.retryOnConflict != nil {
+		updateCommand["_retry_on_conflict"] = *r.retryOnConflict
+	}
+	if r.upsert != nil {
+		updateCommand["upsert"] = r.upsert
+	}
+	command["update"] = updateCommand
+	line, err := json.Marshal(command)
+	if err != nil {
+		return nil, err
+	}
+	lines[0] = string(line)
+
+	// 2nd line: {"doc" : { ... }} or {"script": {...}}
+	source := make(map[string]interface{})
+	if r.docAsUpsert != nil {
+		source["doc_as_upsert"] = *r.docAsUpsert
+	}
+	if r.doc != nil {
+		// {"doc":{...}}
+		source["doc"] = r.doc
+	} else if r.script != "" {
+		// {"script":...}
+		source["script"] = r.script
+		if r.scriptLang != "" {
+			source["lang"] = r.scriptLang
+		}
+		/*
+			if r.scriptType != "" {
+				source["script_type"] = r.scriptType
+			}
+		*/
+		if r.scriptParams != nil && len(r.scriptParams) > 0 {
+			source["params"] = r.scriptParams
+		}
+	}
+	lines[1], err = r.getSourceAsString(source)
+	if err != nil {
+		return nil, err
+	}
+
+	return lines, nil
+}

+ 28 - 0
github.com/olivere/elastic/canonicalize.go

@@ -0,0 +1,28 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "net/url"
+
+// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
+// remove anything but scheme, userinfo, host, and port. It also removes the
+// slash at the end. It also skips invalid URLs or URLs that do not use
+// protocol http or https.
+//
+// Example:
+// http://127.0.0.1:9200/path?query=1 -> http://127.0.0.1:9200
+func canonicalize(rawurls ...string) []string {
+	canonicalized := make([]string, 0)
+	for _, rawurl := range rawurls {
+		u, err := url.Parse(rawurl)
+		if err == nil && (u.Scheme == "http" || u.Scheme == "https") {
+			u.Fragment = ""
+			u.Path = ""
+			u.RawQuery = ""
+			canonicalized = append(canonicalized, u.String())
+		}
+	}
+	return canonicalized
+}

+ 96 - 0
github.com/olivere/elastic/clear_scroll.go

@@ -0,0 +1,96 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// ClearScrollService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-request-scroll.html.
+type ClearScrollService struct {
+	client     *Client
+	pretty     bool
+	scrollId   []string
+	bodyJson   interface{}
+	bodyString string
+}
+
+// NewClearScrollService creates a new ClearScrollService.
+func NewClearScrollService(client *Client) *ClearScrollService {
+	return &ClearScrollService{
+		client:   client,
+		scrollId: make([]string, 0),
+	}
+}
+
+// ScrollId is a list of scroll IDs to clear.
+// Use _all to clear all search contexts.
+func (s *ClearScrollService) ScrollId(scrollId ...string) *ClearScrollService {
+	s.scrollId = make([]string, 0)
+	s.scrollId = append(s.scrollId, scrollId...)
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClearScrollService) buildURL() (string, url.Values, error) {
+	path, err := uritemplates.Expand("/_search/scroll", map[string]string{})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+	return path, url.Values{}, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClearScrollService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *ClearScrollService) Do() (*ClearScrollResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request body
+	body := strings.Join(s.scrollId, ",")
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("DELETE", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(ClearScrollResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// ClearScrollResponse is the response of ClearScrollService.Do.
+type ClearScrollResponse struct {
+}

+ 1145 - 0
github.com/olivere/elastic/client.go

@@ -0,0 +1,1145 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"log"
+	"math/rand"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+	"regexp"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	// Version is the current version of Elastic.
+	Version = "2.0.0"
+
+	// DefaultUrl is the default endpoint of Elasticsearch on the local machine.
+	// It is used e.g. when initializing a new Client without a specific URL.
+	DefaultURL = "http://127.0.0.1:9200"
+
+	// DefaultScheme is the default protocol scheme to use when sniffing
+	// the Elasticsearch cluster.
+	DefaultScheme = "http"
+
+	// DefaultHealthcheckEnabled specifies if healthchecks are enabled by default.
+	DefaultHealthcheckEnabled = true
+
+	// DefaultHealthcheckInterval is the default interval between
+	// two health checks of the nodes in the cluster.
+	DefaultHealthcheckInterval = 60 * time.Second
+
+	// DefaultSnifferEnabled specifies if the sniffer is enabled by default.
+	DefaultSnifferEnabled = true
+
+	// DefaultSnifferInterval is the interval between two sniffing procedures,
+	// i.e. the lookup of all nodes in the cluster and their addition/removal
+	// from the list of actual connections.
+	DefaultSnifferInterval = 15 * time.Minute
+
+	// DefaultSnifferTimeout is the default timeout after which the
+	// sniffing process times out.
+	DefaultSnifferTimeout = 1 * time.Second
+
+	// DefaultMaxRetries is the number of retries for a single request after
+	// Elastic will give up and return an error. It is zero by default, so
+	// retry is disabled by default.
+	DefaultMaxRetries = 0
+)
+
+var (
+	// ErrNoClient is raised when no Elasticsearch node is available.
+	ErrNoClient = errors.New("no Elasticsearch node available")
+
+	// ErrRetry is raised when a request cannot be executed after the configured
+	// number of retries.
+	ErrRetry = errors.New("cannot connect after several retries")
+)
+
+// ClientOptionFunc is a function that configures a Client.
+// It is used in NewClient.
+type ClientOptionFunc func(*Client) error
+
+// Client is an Elasticsearch client. Create one by calling NewClient.
+type Client struct {
+	c *http.Client // net/http Client to use for requests
+
+	connsMu sync.RWMutex // connsMu guards the next block
+	conns   []*conn      // all connections
+	cindex  int          // index into conns
+
+	mu                  sync.RWMutex  // guards the next block
+	urls                []string      // set of URLs passed initially to the client
+	running             bool          // true if the client's background processes are running
+	errorlog            *log.Logger   // error log for critical messages
+	infolog             *log.Logger   // information log for e.g. response times
+	tracelog            *log.Logger   // trace log for debugging
+	maxRetries          int           // max. number of retries
+	scheme              string        // http or https
+	healthcheckEnabled  bool          // healthchecks enabled or disabled
+	healthcheckInterval time.Duration // interval between healthchecks
+	healthcheckStop     chan bool     // notify healthchecker to stop, and notify back
+	snifferEnabled      bool          // sniffer enabled or disabled
+	snifferTimeout      time.Duration // time the sniffer waits for a response from nodes info API
+	snifferInterval     time.Duration // interval between sniffing
+	snifferStop         chan bool     // notify sniffer to stop, and notify back
+	decoder             Decoder       // used to decode data sent from Elasticsearch
+}
+
+// NewClient creates a new client to work with Elasticsearch.
+//
+// The caller can configure the new client by passing configuration options
+// to the func.
+//
+// Example:
+//
+//   client, err := elastic.NewClient(
+//     elastic.SetURL("http://localhost:9200", "http://localhost:9201"),
+//     elastic.SetMaxRetries(10))
+//
+// If no URL is configured, Elastic uses DefaultURL by default.
+//
+// If the sniffer is enabled (the default), the new client then sniffes
+// the cluster via the Nodes Info API
+// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info).
+// It uses the URLs specified by the caller. The caller is responsible
+// to only pass a list of URLs of nodes that belong to the same cluster.
+// This sniffing process is run on startup and periodically.
+// Use SnifferInterval to set the interval between two sniffs (default is
+// 15 minutes). In other words: By default, the client will find new nodes
+// in the cluster and remove those that are no longer available every
+// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient.
+//
+// The list of nodes found in the sniffing process will be used to make
+// connections to the REST API of Elasticsearch. These nodes are also
+// periodically checked in a shorter time frame. This process is called
+// a health check. By default, a health check is done every 60 seconds.
+// You can set a shorter or longer interval by SetHealthcheckInterval.
+// Disabling health checks is not recommended, but can be done by
+// SetHealthcheck(false).
+//
+// Connections are automatically marked as dead or healthy while
+// making requests to Elasticsearch. When a request fails, Elastic will
+// retry up to a maximum number of retries configured with SetMaxRetries.
+// Retries are disabled by default.
+//
+// If no HttpClient is configured, then http.DefaultClient is used.
+// You can use your own http.Client with some http.Transport for
+// advanced scenarios.
+//
+// An error is also returned when some configuration option is invalid or
+// the new client cannot sniff the cluster (if enabled).
+func NewClient(options ...ClientOptionFunc) (*Client, error) {
+	// Set up the client
+	c := &Client{
+		urls:                []string{DefaultURL},
+		c:                   http.DefaultClient,
+		conns:               make([]*conn, 0),
+		cindex:              -1,
+		scheme:              DefaultScheme,
+		decoder:             &DefaultDecoder{},
+		maxRetries:          DefaultMaxRetries,
+		healthcheckEnabled:  DefaultHealthcheckEnabled,
+		healthcheckInterval: DefaultHealthcheckInterval,
+		healthcheckStop:     make(chan bool),
+		snifferEnabled:      DefaultSnifferEnabled,
+		snifferInterval:     DefaultSnifferInterval,
+		snifferStop:         make(chan bool),
+		snifferTimeout:      DefaultSnifferTimeout,
+	}
+
+	// Run the options on it
+	for _, option := range options {
+		if err := option(c); err != nil {
+			return nil, err
+		}
+	}
+
+	if len(c.urls) == 0 {
+		c.urls = []string{DefaultURL}
+	}
+	c.urls = canonicalize(c.urls...)
+
+	if c.snifferEnabled {
+		// Sniff the cluster initially
+		if err := c.sniff(); err != nil {
+			return nil, err
+		}
+	} else {
+		// Do not sniff the cluster initially. Use the provided URLs instead.
+		for _, url := range c.urls {
+			c.conns = append(c.conns, newConn(url, url))
+		}
+	}
+
+	// Perform an initial health check
+	c.healthcheck()
+
+	go c.sniffer()       // periodically update cluster information
+	go c.healthchecker() // start goroutine periodically ping all nodes of the cluster
+
+	c.mu.Lock()
+	c.running = true
+	c.mu.Unlock()
+
+	return c, nil
+}
+
+// SetHttpClient can be used to specify the http.Client to use when making
+// HTTP requests to Elasticsearch.
+func SetHttpClient(httpClient *http.Client) ClientOptionFunc {
+	return func(c *Client) error {
+		if httpClient != nil {
+			c.c = httpClient
+		} else {
+			c.c = http.DefaultClient
+		}
+		return nil
+	}
+}
+
+// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that
+// when sniffing is enabled, these URLs are used to initially sniff the
+// cluster on startup.
+func SetURL(urls ...string) ClientOptionFunc {
+	return func(c *Client) error {
+		switch len(urls) {
+		case 0:
+			c.urls = []string{DefaultURL}
+		default:
+			c.urls = make([]string, 0)
+			for _, url := range urls {
+				c.urls = append(c.urls, url)
+			}
+		}
+		return nil
+	}
+}
+
+// SetScheme sets the HTTP scheme to look for when sniffing (http or https).
+// This is http by default.
+func SetScheme(scheme string) ClientOptionFunc {
+	return func(c *Client) error {
+		c.scheme = scheme
+		return nil
+	}
+}
+
+// SetSniff enables or disables the sniffer (enabled by default).
+func SetSniff(enabled bool) ClientOptionFunc {
+	return func(c *Client) error {
+		c.snifferEnabled = enabled
+		return nil
+	}
+}
+
+// SetSnifferInterval sets the interval between two sniffing processes.
+// The default interval is 15 minutes.
+func SetSnifferInterval(interval time.Duration) ClientOptionFunc {
+	return func(c *Client) error {
+		c.snifferInterval = interval
+		return nil
+	}
+}
+
+// SetSnifferTimeout sets the timeout for the sniffer that finds the
+// nodes in a cluster. The default is 1 second.
+func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc {
+	return func(c *Client) error {
+		c.snifferTimeout = timeout
+		return nil
+	}
+}
+
+// SetHealthcheck enables or disables healthchecks (enabled by default).
+func SetHealthcheck(enabled bool) ClientOptionFunc {
+	return func(c *Client) error {
+		c.healthcheckEnabled = enabled
+		return nil
+	}
+}
+
+// SetHealthcheckInterval sets the interval between two health checks.
+// The default interval is 60 seconds.
+func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc {
+	return func(c *Client) error {
+		c.healthcheckInterval = interval
+		return nil
+	}
+}
+
+// SetMaxRetries sets the maximum number of retries before giving up when
+// performing a HTTP request to Elasticsearch.
+func SetMaxRetries(maxRetries int) func(*Client) error {
+	return func(c *Client) error {
+		if maxRetries < 0 {
+			return errors.New("MaxRetries must be greater than or equal to 0")
+		}
+		c.maxRetries = maxRetries
+		return nil
+	}
+}
+
+// SetDecoder sets the Decoder to use when decoding data from Elasticsearch.
+// DefaultDecoder is used by default.
+func SetDecoder(decoder Decoder) func(*Client) error {
+	return func(c *Client) error {
+		if decoder != nil {
+			c.decoder = decoder
+		} else {
+			c.decoder = &DefaultDecoder{}
+		}
+		return nil
+	}
+}
+
+// SetErrorLog sets the logger for critical messages like nodes joining
+// or leaving the cluster or failing requests. It is nil by default.
+func SetErrorLog(logger *log.Logger) func(*Client) error {
+	return func(c *Client) error {
+		c.errorlog = logger
+		return nil
+	}
+}
+
+// SetInfoLog sets the logger for informational messages, e.g. requests
+// and their response times. It is nil by default.
+func SetInfoLog(logger *log.Logger) func(*Client) error {
+	return func(c *Client) error {
+		c.infolog = logger
+		return nil
+	}
+}
+
+// SetTraceLog specifies the log.Logger to use for output of HTTP requests
+// and responses which is helpful during debugging. It is nil by default.
+func SetTraceLog(logger *log.Logger) func(*Client) error {
+	return func(c *Client) error {
+		c.tracelog = logger
+		return nil
+	}
+}
+
+// String returns a string representation of the client status.
+func (c *Client) String() string {
+	c.connsMu.Lock()
+	conns := c.conns
+	c.connsMu.Unlock()
+
+	var buf bytes.Buffer
+	for i, conn := range conns {
+		if i > 0 {
+			buf.WriteString(", ")
+		}
+		buf.WriteString(conn.String())
+	}
+	return buf.String()
+}
+
+// IsRunning returns true if the background processes of the client are
+// running, false otherwise.
+func (c *Client) IsRunning() bool {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.running
+}
+
+// Start starts the background processes like sniffing the cluster and
+// periodic health checks. You don't need to run Start when creating a
+// client with NewClient; the background processes are run by default.
+//
+// If the background processes are already running, this is a no-op.
+func (c *Client) Start() {
+	c.mu.RLock()
+	if c.running {
+		c.mu.RUnlock()
+		return
+	}
+	c.mu.RUnlock()
+
+	go c.sniffer()
+	go c.healthchecker()
+
+	c.mu.Lock()
+	c.running = true
+	c.mu.Unlock()
+
+	c.infof("elastic: client started")
+}
+
+// Stop stops the background processes that the client is running,
+// i.e. sniffing the cluster periodically and running health checks
+// on the nodes.
+//
+// If the background processes are not running, this is a no-op.
+func (c *Client) Stop() {
+	c.mu.RLock()
+	if !c.running {
+		c.mu.RUnlock()
+		return
+	}
+	c.mu.RUnlock()
+
+	c.healthcheckStop <- true
+	<-c.healthcheckStop
+
+	c.snifferStop <- true
+	<-c.snifferStop
+
+	c.mu.Lock()
+	c.running = false
+	c.mu.Unlock()
+
+	c.infof("elastic: client stopped")
+}
+
+// errorf logs to the error log.
+func (c *Client) errorf(format string, args ...interface{}) {
+	if c.errorlog != nil {
+		c.errorlog.Printf(format, args...)
+	}
+}
+
+// infof logs informational messages.
+func (c *Client) infof(format string, args ...interface{}) {
+	if c.infolog != nil {
+		c.infolog.Printf(format, args...)
+	}
+}
+
+// tracef logs to the trace log.
+func (c *Client) tracef(format string, args ...interface{}) {
+	if c.tracelog != nil {
+		c.tracelog.Printf(format, args...)
+	}
+}
+
+// dumpRequest dumps the given HTTP request to the trace log.
+func (c *Client) dumpRequest(r *http.Request) {
+	if c.tracelog != nil {
+		out, err := httputil.DumpRequestOut(r, true)
+		if err == nil {
+			c.tracef("%s\n", string(out))
+		}
+	}
+}
+
+// dumpResponse dumps the given HTTP response to the trace log.
+func (c *Client) dumpResponse(resp *http.Response) {
+	if c.tracelog != nil {
+		out, err := httputil.DumpResponse(resp, true)
+		if err == nil {
+			c.tracef("%s\n", string(out))
+		}
+	}
+}
+
+// sniffer periodically runs sniff.
+func (c *Client) sniffer() {
+	for {
+		c.mu.RLock()
+		ticker := time.NewTicker(c.snifferInterval)
+		c.mu.RUnlock()
+
+		select {
+		case <-c.snifferStop:
+			// we are asked to stop, so we signal back that we're stopping now
+			c.snifferStop <- true
+			return
+		case <-ticker.C:
+			c.sniff()
+		}
+	}
+}
+
+// sniff uses the Node Info API to return the list of nodes in the cluster.
+// It uses the list of URLs passed on startup plus the list of URLs found
+// by the preceding sniffing process (if sniffing is enabled).
+//
+// If sniffing is disabled, this is a no-op.
+func (c *Client) sniff() error {
+	c.mu.RLock()
+	if !c.snifferEnabled {
+		c.mu.RUnlock()
+		return nil
+	}
+
+	// Use all available URLs provided to sniff the cluster.
+	urlsMap := make(map[string]bool)
+	urls := make([]string, 0)
+
+	// Add all URLs provided on startup
+	for _, url := range c.urls {
+		urlsMap[url] = true
+		urls = append(urls, url)
+	}
+	timeout := c.snifferTimeout
+	c.mu.RUnlock()
+
+	// Add all URLs found by sniffing
+	c.connsMu.RLock()
+	for _, conn := range c.conns {
+		if !conn.IsDead() {
+			url := conn.URL()
+			if _, found := urlsMap[url]; !found {
+				urls = append(urls, url)
+			}
+		}
+	}
+	c.connsMu.RUnlock()
+
+	if len(urls) == 0 {
+		return ErrNoClient
+	}
+
+	// Start sniffing on all found URLs
+	ch := make(chan []*conn, len(urls))
+	for _, url := range urls {
+		go func(url string) { ch <- c.sniffNode(url) }(url)
+	}
+
+	// Wait for the results to come back, or the process times out.
+	for {
+		select {
+		case conns := <-ch:
+			if len(conns) > 0 {
+				c.updateConns(conns)
+				return nil
+			}
+		case <-time.After(timeout):
+			// We get here if no cluster responds in time
+			return ErrNoClient
+		}
+	}
+}
+
+// reSniffHostAndPort is used to extract hostname and port from a result
+// from a Nodes Info API (example: "inet[/127.0.0.1:9200]").
+var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`)
+
+// sniffNode sniffs a single node. This method is run as a goroutine
+// in sniff. If successful, it returns the list of node URLs extracted
+// from the result of calling Nodes Info API. Otherwise, an empty array
+// is returned.
+func (c *Client) sniffNode(url string) []*conn {
+	nodes := make([]*conn, 0)
+
+	// Call the Nodes Info API at /_nodes/http
+	req, err := NewRequest("GET", url+"/_nodes/http")
+	if err != nil {
+		return nodes
+	}
+
+	res, err := c.c.Do((*http.Request)(req))
+	if err != nil {
+		return nodes
+	}
+	if res == nil {
+		return nodes
+	}
+
+	if res.Body != nil {
+		defer res.Body.Close()
+	}
+
+	var info NodesInfoResponse
+	if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
+		if len(info.Nodes) > 0 {
+			switch c.scheme {
+			case "https":
+				for nodeID, node := range info.Nodes {
+					m := reSniffHostAndPort.FindStringSubmatch(node.HTTPSAddress)
+					if len(m) == 3 {
+						url := fmt.Sprintf("https://%s:%s", m[1], m[2])
+						nodes = append(nodes, newConn(nodeID, url))
+					}
+				}
+			default:
+				for nodeID, node := range info.Nodes {
+					m := reSniffHostAndPort.FindStringSubmatch(node.HTTPAddress)
+					if len(m) == 3 {
+						url := fmt.Sprintf("http://%s:%s", m[1], m[2])
+						nodes = append(nodes, newConn(nodeID, url))
+					}
+				}
+			}
+		}
+	}
+	return nodes
+}
+
+// updateConns updates the clients' connections with new information
+// gather by a sniff operation.
+func (c *Client) updateConns(conns []*conn) {
+	c.connsMu.Lock()
+
+	newConns := make([]*conn, 0)
+
+	// Build up new connections:
+	// If we find an existing connection, use that (including no. of failures etc.).
+	// If we find a new connection, add it.
+	for _, conn := range conns {
+		var found bool
+		for _, oldConn := range c.conns {
+			if oldConn.NodeID() == conn.NodeID() {
+				// Take over the old connection
+				newConns = append(newConns, oldConn)
+				found = true
+				break
+			}
+		}
+		if !found {
+			// New connection didn't exist, so add it to our list of new conns.
+			c.errorf("elastic: %s joined the cluster", conn.URL())
+			newConns = append(newConns, conn)
+		}
+	}
+
+	c.conns = newConns
+	c.cindex = -1
+	c.connsMu.Unlock()
+}
+
+// healthchecker periodically runs healthcheck.
+func (c *Client) healthchecker() {
+	for {
+		c.mu.RLock()
+		ticker := time.NewTicker(c.healthcheckInterval)
+		c.mu.RUnlock()
+
+		select {
+		case <-c.healthcheckStop:
+			// we are asked to stop, so we signal back that we're stopping now
+			c.healthcheckStop <- true
+			return
+		case <-ticker.C:
+			c.healthcheck()
+		}
+	}
+}
+
+// healthcheck does a health check on all nodes in the cluster. Depending on
+// the node state, it marks connections as dead, sets them alive etc.
+// If healthchecks are disabled, this is a no-op.
+func (c *Client) healthcheck() {
+	c.mu.RLock()
+	if !c.healthcheckEnabled {
+		c.mu.RUnlock()
+		return
+	}
+	c.mu.RUnlock()
+
+	c.connsMu.RLock()
+	conns := c.conns
+	c.connsMu.RUnlock()
+
+	for _, conn := range conns {
+		params := make(url.Values)
+		params.Set("timeout", "1")
+		req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode())
+		if err == nil {
+			res, err := c.c.Do((*http.Request)(req))
+			if err == nil {
+				if res.Body != nil {
+					defer res.Body.Close()
+				}
+				if res.StatusCode >= 200 && res.StatusCode < 300 {
+					conn.MarkAsAlive()
+				} else {
+					conn.MarkAsDead()
+					c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode)
+				}
+			} else {
+				c.errorf("elastic: %s is dead", conn.URL())
+				conn.MarkAsDead()
+			}
+		} else {
+			c.errorf("elastic: %s is dead", conn.URL())
+			conn.MarkAsDead()
+		}
+	}
+}
+
+// next returns the next available connection, or ErrNoClient.
+func (c *Client) next() (*conn, error) {
+	// We do round-robin here.
+	// TODO: This should be a pluggable strategy, like the Selector in the official clients.
+	c.connsMu.Lock()
+	defer c.connsMu.Unlock()
+
+	i := 0
+	numConns := len(c.conns)
+	for {
+		i += 1
+		if i > numConns {
+			break // we visited all conns: they all seem to be dead
+		}
+		c.cindex += 1
+		if c.cindex >= numConns {
+			c.cindex = 0
+		}
+		conn := c.conns[c.cindex]
+		if !conn.IsDead() {
+			return conn, nil
+		}
+	}
+
+	// TODO: As a last resort, we could try to awake a dead connection here.
+
+	// We tried hard, but there is no node available
+	return nil, ErrNoClient
+}
+
+// PerformRequest does a HTTP request to Elasticsearch.
+// It returns a response and an error on failure.
+func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}) (*Response, error) {
+	start := time.Now().UTC()
+
+	c.mu.RLock()
+	retries := c.maxRetries
+	c.mu.RUnlock()
+
+	var err error
+	var conn *conn
+	var req *Request
+	var resp *Response
+	var retried bool
+
+	// We wait between retries, using simple exponential back-off.
+	// TODO: Make this configurable, including the jitter.
+	retryWaitMsec := int64(100 + (rand.Intn(20) - 10))
+
+	for {
+		pathWithParams := path
+		if len(params) > 0 {
+			pathWithParams += "?" + params.Encode()
+		}
+
+		// Get a connection
+		conn, err = c.next()
+		if err == ErrNoClient {
+			if !retried {
+				// Force a healtcheck as all connections seem to be dead.
+				c.healthcheck()
+			}
+			retries -= 1
+			if retries <= 0 {
+				return nil, err
+			}
+			retried = true
+			time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+			retryWaitMsec += retryWaitMsec
+			continue // try again
+		}
+		if err != nil {
+			c.errorf("elastic: cannot get connection from pool")
+			return nil, err
+		}
+
+		req, err = NewRequest(method, conn.URL()+pathWithParams)
+		if err != nil {
+			c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err)
+			return nil, err
+		}
+
+		// Set body
+		if body != nil {
+			switch b := body.(type) {
+			case string:
+				req.SetBodyString(b)
+				break
+			default:
+				req.SetBodyJson(body)
+				break
+			}
+		}
+
+		// Tracing
+		c.dumpRequest((*http.Request)(req))
+
+		// Get response
+		res, err := c.c.Do((*http.Request)(req))
+		if err != nil {
+			retries -= 1
+			if retries <= 0 {
+				c.errorf("elastic: %s is dead", conn.URL())
+				conn.MarkAsDead()
+				return nil, err
+			}
+			retried = true
+			time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+			retryWaitMsec += retryWaitMsec
+			continue // try again
+		}
+		if res.Body != nil {
+			defer res.Body.Close()
+		}
+
+		// Check for errors
+		if err := checkResponse(res); err != nil {
+			retries -= 1
+			if retries <= 0 {
+				return nil, err
+			}
+			retried = true
+			time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+			retryWaitMsec += retryWaitMsec
+			continue // try again
+		}
+
+		// Tracing
+		c.dumpResponse(res)
+
+		// We successfully made a request with this connection
+		conn.MarkAsHealthy()
+
+		resp, err = c.newResponse(res)
+		if err != nil {
+			return nil, err
+		}
+
+		break
+	}
+
+	duration := time.Now().UTC().Sub(start)
+	c.infof("%s %s [status:%d, request:%.3fs]",
+		strings.ToUpper(method),
+		req.URL,
+		resp.StatusCode,
+		float64(int64(duration/time.Millisecond))/1000)
+
+	return resp, nil
+}
+
+// ElasticsearchVersion returns the version number of Elasticsearch
+// running on the given URL.
+func (c *Client) ElasticsearchVersion(url string) (string, error) {
+	res, _, err := c.Ping().URL(url).Do()
+	if err != nil {
+		return "", err
+	}
+	return res.Version.Number, nil
+}
+
+// IndexNames returns the names of all indices in the cluster.
+func (c *Client) IndexNames() ([]string, error) {
+	res, err := c.IndexGetSettings().Index("_all").Do()
+	if err != nil {
+		return nil, err
+	}
+	var names []string
+	for name, _ := range res {
+		names = append(names, name)
+	}
+	return names, nil
+}
+
+// Ping checks if a given node in a cluster exists and (optionally)
+// returns some basic information about the Elasticsearch server,
+// e.g. the Elasticsearch version number.
+func (c *Client) Ping() *PingService {
+	return NewPingService(c)
+}
+
+// CreateIndex returns a service to create a new index.
+func (c *Client) CreateIndex(name string) *CreateIndexService {
+	builder := NewCreateIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// DeleteIndex returns a service to delete an index.
+func (c *Client) DeleteIndex(name string) *DeleteIndexService {
+	builder := NewDeleteIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// IndexExists allows to check if an index exists.
+func (c *Client) IndexExists(name string) *IndexExistsService {
+	builder := NewIndexExistsService(c)
+	builder.Index(name)
+	return builder
+}
+
+// TypeExists allows to check if one or more types exist in one or more indices.
+func (c *Client) TypeExists() *IndicesExistsTypeService {
+	return NewIndicesExistsTypeService(c)
+}
+
+// IndexStats provides statistics on different operations happining
+// in one or more indices.
+func (c *Client) IndexStats(indices ...string) *IndicesStatsService {
+	builder := NewIndicesStatsService(c)
+	builder = builder.Index(indices...)
+	return builder
+}
+
+// OpenIndex opens an index.
+func (c *Client) OpenIndex(name string) *OpenIndexService {
+	builder := NewOpenIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// CloseIndex closes an index.
+func (c *Client) CloseIndex(name string) *CloseIndexService {
+	builder := NewCloseIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// Index a document.
+func (c *Client) Index() *IndexService {
+	builder := NewIndexService(c)
+	return builder
+}
+
+// IndexGet retrieves information about one or more indices.
+// IndexGet is only available for Elasticsearch 1.4 or later.
+func (c *Client) IndexGet() *IndicesGetService {
+	builder := NewIndicesGetService(c)
+	return builder
+}
+
+// IndexGetSettings retrieves settings about one or more indices.
+func (c *Client) IndexGetSettings() *IndicesGetSettingsService {
+	builder := NewIndicesGetSettingsService(c)
+	return builder
+}
+
+// Update a document.
+func (c *Client) Update() *UpdateService {
+	builder := NewUpdateService(c)
+	return builder
+}
+
+// Delete a document.
+func (c *Client) Delete() *DeleteService {
+	builder := NewDeleteService(c)
+	return builder
+}
+
+// DeleteByQuery deletes documents as found by a query.
+func (c *Client) DeleteByQuery() *DeleteByQueryService {
+	builder := NewDeleteByQueryService(c)
+	return builder
+}
+
+// Get a document.
+func (c *Client) Get() *GetService {
+	builder := NewGetService(c)
+	return builder
+}
+
+// MultiGet retrieves multiple documents in one roundtrip.
+func (c *Client) MultiGet() *MultiGetService {
+	builder := NewMultiGetService(c)
+	return builder
+}
+
+// Exists checks if a document exists.
+func (c *Client) Exists() *ExistsService {
+	builder := NewExistsService(c)
+	return builder
+}
+
+// Count documents.
+func (c *Client) Count(indices ...string) *CountService {
+	builder := NewCountService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Search is the entry point for searches.
+func (c *Client) Search(indices ...string) *SearchService {
+	builder := NewSearchService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Percolate allows to send a document and return matching queries.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html.
+func (c *Client) Percolate() *PercolateService {
+	builder := NewPercolateService(c)
+	return builder
+}
+
+// MultiSearch is the entry point for multi searches.
+func (c *Client) MultiSearch() *MultiSearchService {
+	return NewMultiSearchService(c)
+}
+
+// Suggest returns a service to return suggestions.
+func (c *Client) Suggest(indices ...string) *SuggestService {
+	builder := NewSuggestService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Scan through documents. Use this to iterate inside a server process
+// where the results will be processed without returning them to a client.
+func (c *Client) Scan(indices ...string) *ScanService {
+	builder := NewScanService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Scroll through documents. Use this to efficiently scroll through results
+// while returning the results to a client. Use Scan when you don't need
+// to return requests to a client (i.e. not paginating via request/response).
+func (c *Client) Scroll(indices ...string) *ScrollService {
+	builder := NewScrollService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// ClearScroll can be used to clear search contexts manually.
+func (c *Client) ClearScroll() *ClearScrollService {
+	builder := NewClearScrollService(c)
+	return builder
+}
+
+// Optimize asks Elasticsearch to optimize one or more indices.
+func (c *Client) Optimize(indices ...string) *OptimizeService {
+	builder := NewOptimizeService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Refresh asks Elasticsearch to refresh one or more indices.
+func (c *Client) Refresh(indices ...string) *RefreshService {
+	builder := NewRefreshService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Flush asks Elasticsearch to free memory from the index and
+// flush data to disk.
+func (c *Client) Flush() *FlushService {
+	builder := NewFlushService(c)
+	return builder
+}
+
+// Explain computes a score explanation for a query and a specific document.
+func (c *Client) Explain(index, typ, id string) *ExplainService {
+	builder := NewExplainService(c)
+	builder = builder.Index(index).Type(typ).Id(id)
+	return builder
+}
+
+// Bulk is the entry point to mass insert/update/delete documents.
+func (c *Client) Bulk() *BulkService {
+	builder := NewBulkService(c)
+	return builder
+}
+
+// Alias enables the caller to add and/or remove aliases.
+func (c *Client) Alias() *AliasService {
+	builder := NewAliasService(c)
+	return builder
+}
+
+// Aliases returns aliases by index name(s).
+func (c *Client) Aliases() *AliasesService {
+	builder := NewAliasesService(c)
+	return builder
+}
+
+// GetTemplate gets a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) GetTemplate() *GetTemplateService {
+	return NewGetTemplateService(c)
+}
+
+// PutTemplate creates or updates a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) PutTemplate() *PutTemplateService {
+	return NewPutTemplateService(c)
+}
+
+// DeleteTemplate deletes a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) DeleteTemplate() *DeleteTemplateService {
+	return NewDeleteTemplateService(c)
+}
+
+// IndexGetTemplate gets an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService {
+	builder := NewIndicesGetTemplateService(c)
+	builder = builder.Name(names...)
+	return builder
+}
+
+// IndexTemplateExists gets check if an index template exists.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService {
+	builder := NewIndicesExistsTemplateService(c)
+	builder = builder.Name(name)
+	return builder
+}
+
+// IndexPutTemplate creates or updates an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService {
+	builder := NewIndicesPutTemplateService(c)
+	builder = builder.Name(name)
+	return builder
+}
+
+// IndexDeleteTemplate deletes an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService {
+	builder := NewIndicesDeleteTemplateService(c)
+	builder = builder.Name(name)
+	return builder
+}
+
+// GetMapping gets a mapping.
+func (c *Client) GetMapping() *GetMappingService {
+	return NewGetMappingService(c)
+}
+
+// PutMapping registers a mapping.
+func (c *Client) PutMapping() *PutMappingService {
+	return NewPutMappingService(c)
+}
+
+// DeleteMapping deletes a mapping.
+func (c *Client) DeleteMapping() *DeleteMappingService {
+	return NewDeleteMappingService(c)
+}
+
+// ClusterHealth retrieves the health of the cluster.
+func (c *Client) ClusterHealth() *ClusterHealthService {
+	return NewClusterHealthService(c)
+}
+
+// ClusterState retrieves the state of the cluster.
+func (c *Client) ClusterState() *ClusterStateService {
+	return NewClusterStateService(c)
+}
+
+// NodesInfo retrieves one or more or all of the cluster nodes information.
+func (c *Client) NodesInfo() *NodesInfoService {
+	return NewNodesInfoService(c)
+}
+
+// Reindex returns a service that will reindex documents from a source
+// index into a target index. See
+// http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
+// for more information about reindexing.
+func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer {
+	return NewReindexer(c, sourceIndex, targetIndex)
+}

+ 63 - 0
github.com/olivere/elastic/cluster-test/README.md

@@ -0,0 +1,63 @@
+# Cluster Test
+
+This directory contains a program you can use to test a cluster.
+
+Here's how:
+
+First, install a cluster of Elasticsearch nodes. You can install them on
+different computers, or start several nodes on a single machine.
+
+Build cluster-test by `go build cluster-test.go` (or build with `make`).
+
+Run `./cluster-test -h` to get a list of flags:
+
+```sh
+$ ./cluster-test -h
+Usage of ./cluster-test:
+  -errorlog="": error log file
+  -healthcheck=true: enable or disable healthchecks
+  -healthchecker=1m0s: healthcheck interval
+  -index="twitter": name of ES index to use
+  -infolog="": info log file
+  -n=5: number of goroutines that run searches
+  -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')
+  -retries=0: number of retries
+  -sniff=true: enable or disable sniffer
+  -sniffer=15m0s: sniffer interval
+  -tracelog="": trace log file
+```
+
+Example:
+
+```sh
+$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log
+```
+
+The above example will create an index and start some search jobs on the
+cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201,
+and http://127.0.0.1:9202.
+
+* It will create an index called `twitter` on the cluster (`-index=twitter`)
+* It will run 5 search jobs in parallel (`-n=5`).
+* It will retry failed requests 5 times (`-retries=5`).
+* It will sniff the cluster periodically (`-sniff=true`).
+* It will sniff the cluster every 10 seconds (`-sniffer=10s`).
+* It will perform health checks periodically (`-healthcheck=true`).
+* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`).
+* It will write an error log file (`-errorlog=error.log`).
+
+If you want to test Elastic with nodes going up and down, you can use a
+chaos monkey script like this and run it on the nodes of your cluster:
+
+```sh
+#!/bin/bash
+while true
+do
+	echo "Starting ES node"
+	elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid
+	sleep `jot -r 1 10 300` # wait for 10-300s
+	echo "Stopping ES node"
+	kill -TERM `cat es.pid`
+	sleep `jot -r 1 10 60`  # wait for 10-60s
+done
+```

+ 185 - 0
github.com/olivere/elastic/cluster_health.go

@@ -0,0 +1,185 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// ClusterHealthService allows to get the status of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-health.html.
+type ClusterHealthService struct {
+	client                  *Client
+	pretty                  bool
+	indices                 []string
+	waitForStatus           string
+	level                   string
+	local                   *bool
+	masterTimeout           string
+	timeout                 string
+	waitForActiveShards     *int
+	waitForNodes            string
+	waitForRelocatingShards *int
+}
+
+// NewClusterHealthService creates a new ClusterHealthService.
+func NewClusterHealthService(client *Client) *ClusterHealthService {
+	return &ClusterHealthService{client: client, indices: make([]string, 0)}
+}
+
+// Index limits the information returned to a specific index.
+func (s *ClusterHealthService) Index(index string) *ClusterHealthService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices limits the information returned to specific indices.
+func (s *ClusterHealthService) Indices(indices ...string) *ClusterHealthService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// Timeout specifies an explicit operation timeout.
+func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
+	s.timeout = timeout
+	return s
+}
+
+// WaitForActiveShards can be used to wait until the specified number of shards are active.
+func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
+	s.waitForActiveShards = &waitForActiveShards
+	return s
+}
+
+// WaitForNodes can be used to wait until the specified number of nodes are available.
+func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
+	s.waitForNodes = waitForNodes
+	return s
+}
+
+// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished.
+func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService {
+	s.waitForRelocatingShards = &waitForRelocatingShards
+	return s
+}
+
+// WaitForStatus can be used to wait until the cluster is in a specific state.
+// Valid values are: green, yellow, or red.
+func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
+	s.waitForStatus = waitForStatus
+	return s
+}
+
+// Level specifies the level of detail for returned information.
+func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
+	s.level = level
+	return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
+	s.local = &local
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_cluster/health/{index}", map[string]string{
+		"index": strings.Join(s.indices, ","),
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.waitForRelocatingShards != nil {
+		params.Set("wait_for_relocating_shards", fmt.Sprintf("%d", *s.waitForRelocatingShards))
+	}
+	if s.waitForStatus != "" {
+		params.Set("wait_for_status", s.waitForStatus)
+	}
+	if s.level != "" {
+		params.Set("level", s.level)
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.waitForActiveShards != nil {
+		params.Set("wait_for_active_shards", fmt.Sprintf("%d", *s.waitForActiveShards))
+	}
+	if s.waitForNodes != "" {
+		params.Set("wait_for_nodes", s.waitForNodes)
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterHealthService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	resp := new(ClusterHealthResponse)
+	if err := json.Unmarshal(res.Body, resp); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ClusterHealthResponse is the response of ClusterHealthService.Do.
+type ClusterHealthResponse struct {
+	ClusterName         string `json:"cluster_name"`
+	Status              string `json:"status"`
+	TimedOut            bool   `json:"timed_out"`
+	NumberOfNodes       int    `json:"number_of_nodes"`
+	NumberOfDataNodes   int    `json:"number_of_data_nodes"`
+	ActivePrimaryShards int    `json:"active_primary_shards"`
+	ActiveShards        int    `json:"active_shards"`
+	RelocatingShards    int    `json:"relocating_shards"`
+	InitializedShards   int    `json:"initialized_shards"`
+	UnassignedShards    int    `json:"unassigned_shards"`
+}

+ 192 - 0
github.com/olivere/elastic/cluster_state.go

@@ -0,0 +1,192 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// ClusterStateService returns the state of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-state.html.
+type ClusterStateService struct {
+	client        *Client
+	pretty        bool
+	indices       []string
+	metrics       []string
+	local         *bool
+	masterTimeout string
+	flatSettings  *bool
+}
+
+// NewClusterStateService creates a new ClusterStateService.
+func NewClusterStateService(client *Client) *ClusterStateService {
+	return &ClusterStateService{
+		client:  client,
+		indices: make([]string, 0),
+		metrics: make([]string, 0),
+	}
+}
+
+// Index the name of the index. Use _all or an empty string to perform
+// the operation on all indices.
+func (s *ClusterStateService) Index(index string) *ClusterStateService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices is a list of index names. Use _all or an empty string to
+// perform the operation on all indices.
+func (s *ClusterStateService) Indices(indices ...string) *ClusterStateService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Metric limits the information returned to the specified metric.
+// It can be one of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metric(metric string) *ClusterStateService {
+	s.metrics = make([]string, 0)
+	s.metrics = append(s.metrics, metric)
+	return s
+}
+
+// Metrics limits the information returned to the specified metrics.
+// It can be any of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metrics(metrics ...string) *ClusterStateService {
+	s.metrics = make([]string, 0)
+	s.metrics = append(s.metrics, metrics...)
+	return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterStateService) Local(local bool) *ClusterStateService {
+	s.local = &local
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
+	s.flatSettings = &flatSettings
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterStateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	metrics := strings.Join(s.metrics, ",")
+	if metrics == "" {
+		metrics = "_all"
+	}
+	indices := strings.Join(s.indices, ",")
+	if indices == "" {
+		indices = "_all"
+	}
+	path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
+		"metrics": metrics,
+		"indices": indices,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.flatSettings != nil {
+		params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterStateService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *ClusterStateService) Do() (*ClusterStateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(ClusterStateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// ClusterStateResponse is the response of ClusterStateService.Do.
+type ClusterStateResponse struct {
+	ClusterName  string                               `json:"cluster_name"`
+	Version      int                                  `json:"version"`
+	MasterNode   string                               `json:"master_node"`
+	Blocks       map[string]interface{}               `json:"blocks"`
+	Nodes        map[string]*ClusterStateNode         `json:"nodes"`
+	Metadata     *ClusterStateMetadata                `json:"metadata"`
+	RoutingTable map[string]*ClusterStateRoutingTable `json:"routing_table"`
+	RoutingNodes *ClusterStateRoutingNode             `json:"routing_nodes"`
+	Allocations  []interface{}                        `json:"allocations"`
+	Customs      map[string]interface{}               `json:"customs"`
+}
+
+type ClusterStateMetadata struct {
+	Templates    map[string]interface{} `json:"templates"`
+	Indices      map[string]interface{} `json:"indices"`
+	Repositories map[string]interface{} `json:"repositories"`
+}
+
+type ClusterStateNode struct {
+	State          string  `json:"state"`
+	Primary        bool    `json:"primary"`
+	Node           string  `json:"node"`
+	RelocatingNode *string `json:"relocating_node"`
+	Shard          int     `json:"shard"`
+	Index          string  `json:"index"`
+}
+
+type ClusterStateRoutingTable struct {
+	Indices map[string]interface{} `json:"indices"`
+}
+
+type ClusterStateRoutingNode struct {
+	Unassigned []interface{}          `json:"unassigned"`
+	Nodes      map[string]interface{} `json:"nodes"`
+}

+ 90 - 0
github.com/olivere/elastic/connection.go

@@ -0,0 +1,90 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"sync"
+	"time"
+)
+
+// conn represents a single connection to a node in a cluster.
+type conn struct {
+	sync.RWMutex
+	nodeID    string // node ID
+	url       string
+	failures  int
+	dead      bool
+	deadSince *time.Time
+}
+
+// newConn creates a new connection to the given URL.
+func newConn(nodeID, url string) *conn {
+	c := &conn{
+		nodeID: nodeID,
+		url:    url,
+	}
+	return c
+}
+
+// String returns a representation of the connection status.
+func (c *conn) String() string {
+	c.RLock()
+	defer c.RUnlock()
+	return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
+}
+
+// NodeID returns the ID of the node of this connection.
+func (c *conn) NodeID() string {
+	c.RLock()
+	defer c.RUnlock()
+	return c.nodeID
+}
+
+// URL returns the URL of this connection.
+func (c *conn) URL() string {
+	c.RLock()
+	defer c.RUnlock()
+	return c.url
+}
+
+// IsDead returns true if this connection is marked as dead, i.e. a previous
+// request to the URL has been unsuccessful.
+func (c *conn) IsDead() bool {
+	c.RLock()
+	defer c.RUnlock()
+	return c.dead
+}
+
+// MarkAsDead marks this connection as dead, increments the failures
+// counter and stores the current time in dead since.
+func (c *conn) MarkAsDead() {
+	c.Lock()
+	c.dead = true
+	if c.deadSince == nil {
+		utcNow := time.Now().UTC()
+		c.deadSince = &utcNow
+	}
+	c.failures += 1
+	c.Unlock()
+}
+
+// MarkAsAlive marks this connection as eligible to be returned from the
+// pool of connections by the selector.
+func (c *conn) MarkAsAlive() {
+	c.Lock()
+	c.dead = false
+	c.Unlock()
+}
+
+// MarkAsHealthy marks this connection as healthy, i.e. a request has been
+// successfully performed with it.
+func (c *conn) MarkAsHealthy() {
+	c.Lock()
+	c.dead = false
+	c.deadSince = nil
+	c.failures = 0
+	c.Unlock()
+}

+ 152 - 0
github.com/olivere/elastic/count.go

@@ -0,0 +1,152 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// CountService is a convenient service for determining the
+// number of documents in an index. Use SearchService with
+// a SearchType of count for counting with queries etc.
+type CountService struct {
+	client  *Client
+	indices []string
+	types   []string
+	query   Query
+	pretty  bool
+}
+
+// CountResult is the result returned from using the Count API
+// (http://www.elasticsearch.org/guide/reference/api/count/)
+type CountResult struct {
+	Count  int64      `json:"count"`
+	Shards shardsInfo `json:"_shards,omitempty"`
+}
+
+func NewCountService(client *Client) *CountService {
+	builder := &CountService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *CountService) Index(index string) *CountService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *CountService) Indices(indices ...string) *CountService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *CountService) Type(typ string) *CountService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+func (s *CountService) Types(types ...string) *CountService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+func (s *CountService) Query(query Query) *CountService {
+	s.query = query
+	return s
+}
+
+func (s *CountService) Pretty(pretty bool) *CountService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *CountService) Do() (int64, error) {
+	var err error
+
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err = uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return 0, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		path += strings.Join(indexPart, ",")
+	}
+
+	// Types part
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err = uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return 0, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		path += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	path += "/_count"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Set body if there is a query specified
+	var body interface{}
+	if s.query != nil {
+		query := make(map[string]interface{})
+		query["query"] = s.query.Source()
+		body = query
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return 0, err
+	}
+
+	// Return result
+	ret := new(CountResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return 0, err
+	}
+	if ret != nil {
+		return ret.Count, nil
+	}
+
+	return int64(0), nil
+}

+ 75 - 0
github.com/olivere/elastic/create_index.go

@@ -0,0 +1,75 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type CreateIndexService struct {
+	client *Client
+	index  string
+	body   string
+	pretty bool
+}
+
+func NewCreateIndexService(client *Client) *CreateIndexService {
+	builder := &CreateIndexService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *CreateIndexService) Index(index string) *CreateIndexService {
+	b.index = index
+	return b
+}
+
+func (b *CreateIndexService) Body(body string) *CreateIndexService {
+	b.body = body
+	return b
+}
+
+func (b *CreateIndexService) Pretty(pretty bool) *CreateIndexService {
+	b.pretty = pretty
+	return b
+}
+
+func (b *CreateIndexService) Do() (*CreateIndexResult, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}/", map[string]string{
+		"index": b.index,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	params := make(url.Values)
+	if b.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", b.pretty))
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest("PUT", path, params, b.body)
+	if err != nil {
+		return nil, err
+	}
+
+	ret := new(CreateIndexResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a create index request.
+
+type CreateIndexResult struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 26 - 0
github.com/olivere/elastic/decoder.go

@@ -0,0 +1,26 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+)
+
+// Decoder is used to decode responses from Elasticsearch.
+// Users of elastic can implement their own marshaler for advanced purposes
+// and set them per Client (see SetDecoder). If none is specified,
+// DefaultDecoder is used.
+type Decoder interface {
+	Decode(data []byte, v interface{}) error
+}
+
+// DefaultDecoder uses json.Unmarshal from the Go standard library
+// to decode JSON data.
+type DefaultDecoder struct{}
+
+// Decode decodes with json.Unmarshal from the Go standard library.
+func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
+	return json.Unmarshal(data, v)
+}

+ 118 - 0
github.com/olivere/elastic/delete.go

@@ -0,0 +1,118 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type DeleteService struct {
+	client  *Client
+	index   string
+	_type   string
+	id      string
+	routing string
+	refresh *bool
+	version *int
+	pretty  bool
+}
+
+func NewDeleteService(client *Client) *DeleteService {
+	builder := &DeleteService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *DeleteService) Index(index string) *DeleteService {
+	s.index = index
+	return s
+}
+
+func (s *DeleteService) Type(_type string) *DeleteService {
+	s._type = _type
+	return s
+}
+
+func (s *DeleteService) Id(id string) *DeleteService {
+	s.id = id
+	return s
+}
+
+func (s *DeleteService) Parent(parent string) *DeleteService {
+	if s.routing == "" {
+		s.routing = parent
+	}
+	return s
+}
+
+func (s *DeleteService) Refresh(refresh bool) *DeleteService {
+	s.refresh = &refresh
+	return s
+}
+
+func (s *DeleteService) Version(version int) *DeleteService {
+	s.version = &version
+	return s
+}
+
+func (s *DeleteService) Pretty(pretty bool) *DeleteService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *DeleteService) Do() (*DeleteResult, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+		"index": s.index,
+		"type":  s._type,
+		"id":    s.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Parameters
+	params := make(url.Values)
+	if s.refresh != nil {
+		params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+	}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *s.version))
+	}
+	if s.routing != "" {
+		params.Set("routing", fmt.Sprintf("%s", s.routing))
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("DELETE", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return response
+	ret := new(DeleteResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a delete request.
+
+type DeleteResult struct {
+	Found   bool   `json:"found"`
+	Index   string `json:"_index"`
+	Type    string `json:"_type"`
+	Id      string `json:"_id"`
+	Version int64  `json:"_version"`
+}

+ 292 - 0
github.com/olivere/elastic/delete_by_query.go

@@ -0,0 +1,292 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// DeleteByQueryService deletes documents that match a query.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html.
+type DeleteByQueryService struct {
+	client            *Client
+	indices           []string
+	types             []string
+	analyzer          string
+	consistency       string
+	defaultOper       string
+	df                string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+	replication       string
+	routing           string
+	timeout           string
+	pretty            bool
+	q                 string
+	query             Query
+}
+
+// NewDeleteByQueryService creates a new DeleteByQueryService.
+// You typically use the client's DeleteByQuery to get a reference to
+// the service.
+func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
+	builder := &DeleteByQueryService{
+		client: client,
+	}
+	return builder
+}
+
+// Index limits the delete-by-query to a single index.
+// You can use _all to perform the operation on all indices.
+func (s *DeleteByQueryService) Index(index string) *DeleteByQueryService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices sets the indices on which to perform the delete operation.
+func (s *DeleteByQueryService) Indices(indices ...string) *DeleteByQueryService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Type limits the delete operation to the given type.
+func (s *DeleteByQueryService) Type(typ string) *DeleteByQueryService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+// Types limits the delete operation to the given types.
+func (s *DeleteByQueryService) Types(types ...string) *DeleteByQueryService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+// Analyzer to use for the query string.
+func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
+	s.analyzer = analyzer
+	return s
+}
+
+// Consistency represents the specific write consistency setting for the operation.
+// It can be one, quorum, or all.
+func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService {
+	s.consistency = consistency
+	return s
+}
+
+// DefaultOperator for query string query (AND or OR).
+func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
+	s.defaultOper = defaultOperator
+	return s
+}
+
+// DF is the field to use as default where no field prefix is given in the query string.
+func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
+	s.df = defaultField
+	return s
+}
+
+// DefaultField is the field to use as default where no field prefix is given in the query string.
+// It is an alias to the DF func.
+func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
+	s.df = defaultField
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
+	s.ignoreUnavailable = &ignore
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices (including the _all string
+// or when no indices have been specified).
+func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
+	s.allowNoIndices = &allow
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both. It can be "open" or "closed".
+func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
+	s.expandWildcards = expand
+	return s
+}
+
+// Replication sets a specific replication type (sync or async).
+func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService {
+	s.replication = replication
+	return s
+}
+
+// Q specifies the query in Lucene query string syntax. You can also use
+// Query to programmatically specify the query.
+func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
+	s.q = query
+	return s
+}
+
+// QueryString is an alias to Q. Notice that you can also use Query to
+// programmatically set the query.
+func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
+	s.q = query
+	return s
+}
+
+// Routing sets a specific routing value.
+func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService {
+	s.routing = routing
+	return s
+}
+
+// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms".
+func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
+	s.timeout = timeout
+	return s
+}
+
+// Pretty indents the JSON output from Elasticsearch.
+func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
+	s.pretty = pretty
+	return s
+}
+
+// Query sets the query programmatically.
+func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
+	s.query = query
+	return s
+}
+
+// Do executes the delete-by-query operation.
+func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) {
+	var err error
+
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err = uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		path += strings.Join(indexPart, ",")
+	}
+
+	// Types part
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err = uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return nil, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		path += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	path += "/_query"
+
+	// Parameters
+	params := make(url.Values)
+	if s.analyzer != "" {
+		params.Set("analyzer", s.analyzer)
+	}
+	if s.consistency != "" {
+		params.Set("consistency", s.consistency)
+	}
+	if s.defaultOper != "" {
+		params.Set("default_operator", s.defaultOper)
+	}
+	if s.df != "" {
+		params.Set("df", s.df)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.replication != "" {
+		params.Set("replication", s.replication)
+	}
+	if s.routing != "" {
+		params.Set("routing", s.routing)
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.q != "" {
+		params.Set("q", s.q)
+	}
+
+	// Set body if there is a query set
+	var body interface{}
+	if s.query != nil {
+		query := make(map[string]interface{})
+		query["query"] = s.query.Source()
+		body = query
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("DELETE", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(DeleteByQueryResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService.
+type DeleteByQueryResult struct {
+	Indices map[string]IndexDeleteByQueryResult `json:"_indices"`
+}
+
+// IndexDeleteByQueryResult is the result of a delete-by-query for a specific
+// index.
+type IndexDeleteByQueryResult struct {
+	Shards shardsInfo `json:"_shards"`
+}

+ 57 - 0
github.com/olivere/elastic/delete_index.go

@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type DeleteIndexService struct {
+	client *Client
+	index  string
+}
+
+func NewDeleteIndexService(client *Client) *DeleteIndexService {
+	builder := &DeleteIndexService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *DeleteIndexService) Index(index string) *DeleteIndexService {
+	b.index = index
+	return b
+}
+
+func (b *DeleteIndexService) Do() (*DeleteIndexResult, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}/", map[string]string{
+		"index": b.index,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest("DELETE", path, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(DeleteIndexResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a delete index request.
+
+type DeleteIndexResult struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 136 - 0
github.com/olivere/elastic/delete_mapping.go

@@ -0,0 +1,136 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// DeleteMappingService allows to delete a mapping along with its data.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-mapping.html.
+type DeleteMappingService struct {
+	client        *Client
+	pretty        bool
+	index         []string
+	typ           []string
+	masterTimeout string
+}
+
+// NewDeleteMappingService creates a new DeleteMappingService.
+func NewDeleteMappingService(client *Client) *DeleteMappingService {
+	return &DeleteMappingService{
+		client: client,
+		index:  make([]string, 0),
+		typ:    make([]string, 0),
+	}
+}
+
+// Index is a list of index names (supports wildcards). Use `_all` for all indices.
+func (s *DeleteMappingService) Index(index ...string) *DeleteMappingService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Type is a list of document types to delete (supports wildcards).
+// Use `_all` to delete all document types in the specified indices..
+func (s *DeleteMappingService) Type(typ ...string) *DeleteMappingService {
+	s.typ = append(s.typ, typ...)
+	return s
+}
+
+// MasterTimeout specifies the timeout for connecting to master.
+func (s *DeleteMappingService) MasterTimeout(masterTimeout string) *DeleteMappingService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *DeleteMappingService) Pretty(pretty bool) *DeleteMappingService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteMappingService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+		"index": strings.Join(s.index, ","),
+		"type":  strings.Join(s.typ, ","),
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteMappingService) Validate() error {
+	var invalid []string
+	if len(s.index) == 0 {
+		invalid = append(invalid, "Index")
+	}
+	if len(s.typ) == 0 {
+		invalid = append(invalid, "Type")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *DeleteMappingService) Do() (*DeleteMappingResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("DELETE", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(DeleteMappingResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// DeleteMappingResponse is the response of DeleteMappingService.Do.
+type DeleteMappingResponse struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 118 - 0
github.com/olivere/elastic/delete_template.go

@@ -0,0 +1,118 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// DeleteTemplateService deletes a search template. More information can
+// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type DeleteTemplateService struct {
+	client      *Client
+	pretty      bool
+	id          string
+	version     *int
+	versionType string
+}
+
+// NewDeleteTemplateService creates a new DeleteTemplateService.
+func NewDeleteTemplateService(client *Client) *DeleteTemplateService {
+	return &DeleteTemplateService{
+		client: client,
+	}
+}
+
+// Id is the template ID.
+func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService {
+	s.id = id
+	return s
+}
+
+// Version an explicit version number for concurrency control.
+func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService {
+	s.version = &version
+	return s
+}
+
+// VersionType specifies a version type.
+func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService {
+	s.versionType = versionType
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+		"id": s.id,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *s.version))
+	}
+	if s.versionType != "" {
+		params.Set("version_type", s.versionType)
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteTemplateService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("DELETE", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(DeleteTemplateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// DeleteTemplateResponse is the response of DeleteTemplateService.Do.
+type DeleteTemplateResponse struct {
+	Found   bool   `json:"found"`
+	Index   string `json:"_index"`
+	Type    string `json:"_type"`
+	Id      string `json:"_id"`
+	Version int    `json:"_version"`
+}

+ 51 - 0
github.com/olivere/elastic/doc.go

@@ -0,0 +1,51 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+/*
+Package elastic provides an interface to the Elasticsearch server
+(http://www.elasticsearch.org/).
+
+The first thing you do is to create a Client. If you have Elasticsearch
+installed and running with its default settings
+(i.e. available at http://127.0.0.1:9200), all you need to do is:
+
+	client, err := elastic.NewClient()
+	if err != nil {
+		// Handle error
+	}
+
+If your Elasticsearch server is running on a different IP and/or port,
+just provide a URL to NewClient:
+
+  // Create a client and connect to http://192.168.2.10:9201
+  client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
+  if err != nil {
+    // Handle error
+  }
+
+You can pass many more configuration parameters to NewClient. Review the
+documentation of NewClient for more information.
+
+If no Elasticsearch server is available, services will fail when creating
+a new request and will return ErrNoClient.
+
+A Client provides services. The services usually come with a variety of
+methods to prepare the query and a Do function to execute it against the
+Elasticsearch REST interface and return a response. Here is an example
+of the IndexExists service that checks if a given index already exists.
+
+	exists, err := client.IndexExists("twitter").Do()
+	if err != nil {
+		// Handle error
+	}
+	if !exists {
+		// Index does not exist yet.
+	}
+
+Look up the documentation for Client to get an idea of the services provided
+and what kinds of responses you get when executing the Do function of a service.
+Also see the wiki on Github for more details.
+
+*/
+package elastic

+ 48 - 0
github.com/olivere/elastic/errors.go

@@ -0,0 +1,48 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+)
+
+func checkResponse(res *http.Response) error {
+	// 200-299 and 404 are valid status codes
+	if (res.StatusCode >= 200 && res.StatusCode <= 299) || res.StatusCode == http.StatusNotFound {
+		return nil
+	}
+	if res.Body == nil {
+		return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+	}
+	slurp, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return fmt.Errorf("elastic: Error %d (%s) when reading body: %v", res.StatusCode, http.StatusText(res.StatusCode), err)
+	}
+	errReply := new(Error)
+	err = json.Unmarshal(slurp, errReply)
+	if err == nil && errReply != nil {
+		if errReply.Status == 0 {
+			errReply.Status = res.StatusCode
+		}
+		return errReply
+	}
+	return nil
+}
+
+type Error struct {
+	Status  int    `json:"status"`
+	Message string `json:"error"`
+}
+
+func (e *Error) Error() string {
+	if e.Message != "" {
+		return fmt.Sprintf("elastic: Error %d (%s): %s", e.Status, http.StatusText(e.Status), e.Message)
+	} else {
+		return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
+	}
+}

+ 71 - 0
github.com/olivere/elastic/exists.go

@@ -0,0 +1,71 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type ExistsService struct {
+	client *Client
+	index  string
+	_type  string
+	id     string
+}
+
+func NewExistsService(client *Client) *ExistsService {
+	builder := &ExistsService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *ExistsService) String() string {
+	return fmt.Sprintf("exists([%v][%v][%v])",
+		s.index,
+		s._type,
+		s.id)
+}
+
+func (s *ExistsService) Index(index string) *ExistsService {
+	s.index = index
+	return s
+}
+
+func (s *ExistsService) Type(_type string) *ExistsService {
+	s._type = _type
+	return s
+}
+
+func (s *ExistsService) Id(id string) *ExistsService {
+	s.id = id
+	return s
+}
+
+func (s *ExistsService) Do() (bool, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+		"index": s.index,
+		"type":  s._type,
+		"id":    s.id,
+	})
+	if err != nil {
+		return false, err
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("HEAD", path, nil, nil)
+	if err != nil {
+		return false, err
+	}
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 329 - 0
github.com/olivere/elastic/explain.go

@@ -0,0 +1,329 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// ExplainService computes a score explanation for a query and
+// a specific document.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html.
+type ExplainService struct {
+	client                 *Client
+	pretty                 bool
+	id                     string
+	index                  string
+	typ                    string
+	q                      string
+	routing                string
+	lenient                *bool
+	analyzer               string
+	df                     string
+	fields                 []string
+	lowercaseExpandedTerms *bool
+	xSourceInclude         []string
+	analyzeWildcard        *bool
+	parent                 string
+	preference             string
+	xSource                []string
+	defaultOperator        string
+	xSourceExclude         []string
+	source                 string
+	bodyJson               interface{}
+	bodyString             string
+}
+
+// NewExplainService creates a new ExplainService.
+func NewExplainService(client *Client) *ExplainService {
+	return &ExplainService{
+		client:         client,
+		xSource:        make([]string, 0),
+		xSourceExclude: make([]string, 0),
+		fields:         make([]string, 0),
+		xSourceInclude: make([]string, 0),
+	}
+}
+
+// Id is the document ID.
+func (s *ExplainService) Id(id string) *ExplainService {
+	s.id = id
+	return s
+}
+
+// Index is the name of the index.
+func (s *ExplainService) Index(index string) *ExplainService {
+	s.index = index
+	return s
+}
+
+// Type is the type of the document.
+func (s *ExplainService) Type(typ string) *ExplainService {
+	s.typ = typ
+	return s
+}
+
+// Source is the URL-encoded query definition (instead of using the request body).
+func (s *ExplainService) Source(source string) *ExplainService {
+	s.source = source
+	return s
+}
+
+// XSourceExclude is a list of fields to exclude from the returned _source field.
+func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
+	s.xSourceExclude = make([]string, 0)
+	s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
+	return s
+}
+
+// Lenient specifies whether format-based query failures
+// (such as providing text to a numeric field) should be ignored.
+func (s *ExplainService) Lenient(lenient bool) *ExplainService {
+	s.lenient = &lenient
+	return s
+}
+
+// Query in the Lucene query string syntax.
+func (s *ExplainService) Q(q string) *ExplainService {
+	s.q = q
+	return s
+}
+
+// Routing sets a specific routing value.
+func (s *ExplainService) Routing(routing string) *ExplainService {
+	s.routing = routing
+	return s
+}
+
+// AnalyzeWildcard specifies whether wildcards and prefix queries
+// in the query string query should be analyzed (default: false).
+func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
+	s.analyzeWildcard = &analyzeWildcard
+	return s
+}
+
+// Analyzer is the analyzer for the query string query.
+func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
+	s.analyzer = analyzer
+	return s
+}
+
+// Df is the default field for query string query (default: _all).
+func (s *ExplainService) Df(df string) *ExplainService {
+	s.df = df
+	return s
+}
+
+// Fields is a list of fields to return in the response.
+func (s *ExplainService) Fields(fields ...string) *ExplainService {
+	s.fields = make([]string, 0)
+	s.fields = append(s.fields, fields...)
+	return s
+}
+
+// LowercaseExpandedTerms specifies whether query terms should be lowercased.
+func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
+	s.lowercaseExpandedTerms = &lowercaseExpandedTerms
+	return s
+}
+
+// XSourceInclude is a list of fields to extract and return from the _source field.
+func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
+	s.xSourceInclude = make([]string, 0)
+	s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
+	return s
+}
+
+// DefaultOperator is the default operator for query string query (AND or OR).
+func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
+	s.defaultOperator = defaultOperator
+	return s
+}
+
+// Parent is the ID of the parent document.
+func (s *ExplainService) Parent(parent string) *ExplainService {
+	s.parent = parent
+	return s
+}
+
+// Preference specifies the node or shard the operation should be performed on (default: random).
+func (s *ExplainService) Preference(preference string) *ExplainService {
+	s.preference = preference
+	return s
+}
+
+// XSource is true or false to return the _source field or not, or a list of fields to return.
+func (s *ExplainService) XSource(xSource ...string) *ExplainService {
+	s.xSource = make([]string, 0)
+	s.xSource = append(s.xSource, xSource...)
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ExplainService) Pretty(pretty bool) *ExplainService {
+	s.pretty = pretty
+	return s
+}
+
+// Query sets a query definition using the Query DSL.
+func (s *ExplainService) Query(query Query) *ExplainService {
+	body := make(map[string]interface{})
+	body["query"] = query.Source()
+	s.bodyJson = body
+	return s
+}
+
+// BodyJson sets the query definition using the Query DSL.
+func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
+	s.bodyJson = body
+	return s
+}
+
+// BodyString sets the query definition using the Query DSL as a string.
+func (s *ExplainService) BodyString(body string) *ExplainService {
+	s.bodyString = body
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ExplainService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
+		"id":    s.id,
+		"index": s.index,
+		"type":  s.typ,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if len(s.xSource) > 0 {
+		params.Set("_source", strings.Join(s.xSource, ","))
+	}
+	if s.defaultOperator != "" {
+		params.Set("default_operator", s.defaultOperator)
+	}
+	if s.parent != "" {
+		params.Set("parent", s.parent)
+	}
+	if s.preference != "" {
+		params.Set("preference", s.preference)
+	}
+	if s.source != "" {
+		params.Set("source", s.source)
+	}
+	if len(s.xSourceExclude) > 0 {
+		params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
+	}
+	if s.lenient != nil {
+		params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
+	}
+	if s.q != "" {
+		params.Set("q", s.q)
+	}
+	if s.routing != "" {
+		params.Set("routing", s.routing)
+	}
+	if len(s.fields) > 0 {
+		params.Set("fields", strings.Join(s.fields, ","))
+	}
+	if s.lowercaseExpandedTerms != nil {
+		params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
+	}
+	if len(s.xSourceInclude) > 0 {
+		params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
+	}
+	if s.analyzeWildcard != nil {
+		params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
+	}
+	if s.analyzer != "" {
+		params.Set("analyzer", s.analyzer)
+	}
+	if s.df != "" {
+		params.Set("df", s.df)
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ExplainService) Validate() error {
+	var invalid []string
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if s.typ == "" {
+		invalid = append(invalid, "Type")
+	}
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *ExplainService) Do() (*ExplainResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request body
+	var body interface{}
+	if s.bodyJson != nil {
+		body = s.bodyJson
+	} else {
+		body = s.bodyString
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(ExplainResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// ExplainResponse is the response of ExplainService.Do.
+type ExplainResponse struct {
+	Index       string                 `json:"_index"`
+	Type        string                 `json:"_type"`
+	Id          string                 `json:"_id"`
+	Matched     bool                   `json:"matched"`
+	Explanation map[string]interface{} `json:"explanation"`
+}

+ 74 - 0
github.com/olivere/elastic/fetch_source_context.go

@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"net/url"
+	"strings"
+)
+
+type FetchSourceContext struct {
+	fetchSource     bool
+	transformSource bool
+	includes        []string
+	excludes        []string
+}
+
+func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
+	return &FetchSourceContext{
+		fetchSource: fetchSource,
+		includes:    make([]string, 0),
+		excludes:    make([]string, 0),
+	}
+}
+
+func (fsc *FetchSourceContext) FetchSource() bool {
+	return fsc.fetchSource
+}
+
+func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
+	fsc.fetchSource = fetchSource
+}
+
+func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
+	fsc.includes = append(fsc.includes, includes...)
+	return fsc
+}
+
+func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
+	fsc.excludes = append(fsc.excludes, excludes...)
+	return fsc
+}
+
+func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext {
+	fsc.transformSource = transformSource
+	return fsc
+}
+
+func (fsc *FetchSourceContext) Source() interface{} {
+	if !fsc.fetchSource {
+		return false
+	}
+	return map[string]interface{}{
+		"includes": fsc.includes,
+		"excludes": fsc.excludes,
+	}
+}
+
+// Query returns the parameters in a form suitable for a URL query string.
+func (fsc *FetchSourceContext) Query() url.Values {
+	params := url.Values{}
+	if !fsc.fetchSource {
+		params.Add("_source", "false")
+		return params
+	}
+	if len(fsc.includes) > 0 {
+		params.Add("_source_include", strings.Join(fsc.includes, ","))
+	}
+	if len(fsc.excludes) > 0 {
+		params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
+	}
+	return params
+}

+ 9 - 0
github.com/olivere/elastic/filter.go

@@ -0,0 +1,9 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Filter interface {
+	Source() interface{}
+}

+ 167 - 0
github.com/olivere/elastic/flush.go

@@ -0,0 +1,167 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// Flush allows to flush one or more indices. The flush process of an index
+// basically frees memory from the index by flushing data to the index
+// storage and clearing the internal transaction log.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html
+// for details.
+type FlushService struct {
+	client *Client
+
+	indices           []string
+	force             *bool
+	full              *bool
+	waitIfOngoing     *bool
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+}
+
+func NewFlushService(client *Client) *FlushService {
+	builder := &FlushService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *FlushService) Index(index string) *FlushService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *FlushService) Indices(indices ...string) *FlushService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Force specifies whether to force a flush even if it is not necessary.
+func (s *FlushService) Force(force bool) *FlushService {
+	s.force = &force
+	return s
+}
+
+// Full, when set to true, creates a new index writer for the index and
+// refreshes all settings related to the index.
+func (s *FlushService) Full(full bool) *FlushService {
+	s.full = &full
+	return s
+}
+
+// WaitIfOngoing will block until the flush can be executed (if set to true)
+// if another flush operation is already executing. The default is false
+// and will cause an exception to be thrown on the shard level if another
+// flush operation is already running. [1.4.0.Beta1]
+func (s *FlushService) WaitIfOngoing(wait bool) *FlushService {
+	s.waitIfOngoing = &wait
+	return s
+}
+
+// IgnoreUnavailable specifies whether concrete indices should be ignored
+// when unavailable (e.g. missing or closed).
+func (s *FlushService) IgnoreUnavailable(ignoreUnavailable bool) *FlushService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices specifies whether to ignore if a wildcard expression
+// yields no indices. This includes the _all index or when no indices
+// have been specified.
+func (s *FlushService) AllowNoIndices(allowNoIndices bool) *FlushService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards specifies whether to expand wildcards to concrete indices
+// that are open, closed, or both. Use one of "open", "closed", "none", or "all".
+func (s *FlushService) ExpandWildcards(expandWildcards string) *FlushService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// Do executes the service.
+func (s *FlushService) Do() (*FlushResult, error) {
+	// Build url
+	path := "/"
+
+	// Indices part
+	if len(s.indices) > 0 {
+		indexPart := make([]string, 0)
+		for _, index := range s.indices {
+			index, err := uritemplates.Expand("{index}", map[string]string{
+				"index": index,
+			})
+			if err != nil {
+				return nil, err
+			}
+			indexPart = append(indexPart, index)
+		}
+		path += strings.Join(indexPart, ",") + "/"
+	}
+	path += "_flush"
+
+	// Parameters
+	params := make(url.Values)
+	if s.force != nil {
+		params.Set("force", fmt.Sprintf("%v", *s.force))
+	}
+	if s.full != nil {
+		params.Set("full", fmt.Sprintf("%v", *s.full))
+	}
+	if s.waitIfOngoing != nil {
+		params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing))
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(FlushResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a flush request.
+
+type shardsInfo struct {
+	Total      int `json:"total"`
+	Successful int `json:"successful"`
+	Failed     int `json:"failed"`
+}
+
+type FlushResult struct {
+	Shards shardsInfo `json:"_shards"`
+}

+ 47 - 0
github.com/olivere/elastic/geo_point.go

@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// GeoPoint is a geographic position described via latitude and longitude.
+type GeoPoint struct {
+	Lat, Lon float64
+}
+
+// Source returns the object to be serialized in Elasticsearch DSL.
+func (pt *GeoPoint) Source() map[string]float64 {
+	return map[string]float64{
+		"lat": pt.Lat,
+		"lon": pt.Lon,
+	}
+}
+
+// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
+func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
+	return &GeoPoint{Lat: lat, Lon: lon}
+}
+
+// GeoPointFromString initializes a new GeoPoint by a string that is
+// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
+func GeoPointFromString(latLon string) (*GeoPoint, error) {
+	latlon := strings.SplitN(latLon, ",", 2)
+	if len(latlon) != 2 {
+		return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
+	}
+	lat, err := strconv.ParseFloat(latlon[0], 64)
+	if err != nil {
+		return nil, err
+	}
+	lon, err := strconv.ParseFloat(latlon[1], 64)
+	if err != nil {
+		return nil, err
+	}
+	return &GeoPoint{Lat: lat, Lon: lon}, nil
+}

+ 223 - 0
github.com/olivere/elastic/get.go

@@ -0,0 +1,223 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type GetService struct {
+	client                        *Client
+	index                         string
+	typ                           string
+	id                            string
+	routing                       string
+	preference                    string
+	fields                        []string
+	refresh                       *bool
+	realtime                      *bool
+	fsc                           *FetchSourceContext
+	versionType                   string
+	version                       *int64
+	ignoreErrorsOnGeneratedFields *bool
+}
+
+func NewGetService(client *Client) *GetService {
+	builder := &GetService{
+		client: client,
+		typ:    "_all",
+	}
+	return builder
+}
+
+func (b *GetService) String() string {
+	return fmt.Sprintf("[%v][%v][%v]: routing [%v]",
+		b.index,
+		b.typ,
+		b.id,
+		b.routing)
+}
+
+func (b *GetService) Index(index string) *GetService {
+	b.index = index
+	return b
+}
+
+func (b *GetService) Type(typ string) *GetService {
+	b.typ = typ
+	return b
+}
+
+func (b *GetService) Id(id string) *GetService {
+	b.id = id
+	return b
+}
+
+func (b *GetService) Parent(parent string) *GetService {
+	if b.routing == "" {
+		b.routing = parent
+	}
+	return b
+}
+
+func (b *GetService) Routing(routing string) *GetService {
+	b.routing = routing
+	return b
+}
+
+func (b *GetService) Preference(preference string) *GetService {
+	b.preference = preference
+	return b
+}
+
+func (b *GetService) Fields(fields ...string) *GetService {
+	if b.fields == nil {
+		b.fields = make([]string, 0)
+	}
+	b.fields = append(b.fields, fields...)
+	return b
+}
+
+func (s *GetService) FetchSource(fetchSource bool) *GetService {
+	if s.fsc == nil {
+		s.fsc = NewFetchSourceContext(fetchSource)
+	} else {
+		s.fsc.SetFetchSource(fetchSource)
+	}
+	return s
+}
+
+func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
+	s.fsc = fetchSourceContext
+	return s
+}
+
+func (b *GetService) Refresh(refresh bool) *GetService {
+	b.refresh = &refresh
+	return b
+}
+
+func (b *GetService) Realtime(realtime bool) *GetService {
+	b.realtime = &realtime
+	return b
+}
+
+func (b *GetService) VersionType(versionType string) *GetService {
+	b.versionType = versionType
+	return b
+}
+
+func (b *GetService) Version(version int64) *GetService {
+	b.version = &version
+	return b
+}
+
+func (b *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
+	b.ignoreErrorsOnGeneratedFields = &ignore
+	return b
+}
+
+// Validate checks if the operation is valid.
+func (s *GetService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if s.typ == "" {
+		invalid = append(invalid, "Type")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+func (b *GetService) Do() (*GetResult, error) {
+	// Check pre-conditions
+	if err := b.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Build url
+	path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+		"index": b.index,
+		"type":  b.typ,
+		"id":    b.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	params := make(url.Values)
+	if b.realtime != nil {
+		params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+	}
+	if len(b.fields) > 0 {
+		params.Add("fields", strings.Join(b.fields, ","))
+	}
+	if b.routing != "" {
+		params.Add("routing", b.routing)
+	}
+	if b.preference != "" {
+		params.Add("preference", b.preference)
+	}
+	if b.refresh != nil {
+		params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+	}
+	if b.realtime != nil {
+		params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+	}
+	if b.ignoreErrorsOnGeneratedFields != nil {
+		params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *b.ignoreErrorsOnGeneratedFields))
+	}
+	if len(b.fields) > 0 {
+		params.Add("_fields", strings.Join(b.fields, ","))
+	}
+	if b.version != nil {
+		params.Add("version", fmt.Sprintf("%d", *b.version))
+	}
+	if b.versionType != "" {
+		params.Add("version_type", b.versionType)
+	}
+	if b.fsc != nil {
+		for k, values := range b.fsc.Query() {
+			params.Add(k, strings.Join(values, ","))
+		}
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(GetResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a get request.
+
+type GetResult struct {
+	Index   string           `json:"_index"`
+	Type    string           `json:"_type"`
+	Id      string           `json:"_id"`
+	Version int64            `json:"_version,omitempty"`
+	Source  *json.RawMessage `json:"_source,omitempty"`
+	Found   bool             `json:"found,omitempty"`
+	Fields  []string         `json:"fields,omitempty"`
+	Error   string           `json:"error,omitempty"` // used only in MultiGet
+}

+ 172 - 0
github.com/olivere/elastic/get_mapping.go

@@ -0,0 +1,172 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// GetMappingService retrieves the mapping definitions for an index or
+// index/type. See at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-mapping.html.
+type GetMappingService struct {
+	client            *Client
+	pretty            bool
+	index             []string
+	typ               []string
+	local             *bool
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+}
+
+// NewGetMappingService creates a new GetMappingService.
+func NewGetMappingService(client *Client) *GetMappingService {
+	return &GetMappingService{
+		client: client,
+		index:  make([]string, 0),
+		typ:    make([]string, 0),
+	}
+}
+
+// Index is a list of index names.
+func (s *GetMappingService) Index(index ...string) *GetMappingService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Type is a list of document types.
+func (s *GetMappingService) Type(typ ...string) *GetMappingService {
+	s.typ = append(s.typ, typ...)
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *GetMappingService) AllowNoIndices(allowNoIndices bool) *GetMappingService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *GetMappingService) ExpandWildcards(expandWildcards string) *GetMappingService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *GetMappingService) Local(local bool) *GetMappingService {
+	s.local = &local
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *GetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *GetMappingService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *GetMappingService) Pretty(pretty bool) *GetMappingService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetMappingService) buildURL() (string, url.Values, error) {
+	var index, typ []string
+
+	if len(s.index) > 0 {
+		index = s.index
+	} else {
+		index = []string{"_all"}
+	}
+
+	if len(s.typ) > 0 {
+		typ = s.typ
+	} else {
+		typ = []string{"_all"}
+	}
+
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+		"index": strings.Join(index, ","),
+		"type":  strings.Join(typ, ","),
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *GetMappingService) Validate() error {
+	return nil
+}
+
+// Do executes the operation. When successful, it returns a json.RawMessage.
+// If you specify an index, Elasticsearch returns HTTP status 404.
+// if you specify a type that does not exist, Elasticsearch returns
+// an empty map.
+func (s *GetMappingService) Do() (map[string]interface{}, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	var ret map[string]interface{}
+	if err := json.Unmarshal(res.Body, &ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}

+ 113 - 0
github.com/olivere/elastic/get_template.go

@@ -0,0 +1,113 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// GetTemplateService reads a search template.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type GetTemplateService struct {
+	client      *Client
+	pretty      bool
+	id          string
+	version     interface{}
+	versionType string
+}
+
+// NewGetTemplateService creates a new GetTemplateService.
+func NewGetTemplateService(client *Client) *GetTemplateService {
+	return &GetTemplateService{
+		client: client,
+	}
+}
+
+// Id is the template ID.
+func (s *GetTemplateService) Id(id string) *GetTemplateService {
+	s.id = id
+	return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *GetTemplateService) Version(version interface{}) *GetTemplateService {
+	s.version = version
+	return s
+}
+
+// VersionType is a specific version type.
+func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService {
+	s.versionType = versionType
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+		"id": s.id,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%v", s.version))
+	}
+	if s.versionType != "" {
+		params.Set("version_type", s.versionType)
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *GetTemplateService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation and returns the template.
+func (s *GetTemplateService) Do() (*GetTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(GetTemplateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+type GetTemplateResponse struct {
+	Template string `json:"template"`
+}

+ 496 - 0
github.com/olivere/elastic/highlight.go

@@ -0,0 +1,496 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Highlight allows highlighting search results on one or more fields.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+type Highlight struct {
+	fields                []*HighlighterField
+	tagsSchema            *string
+	highlightFilter       *bool
+	fragmentSize          *int
+	numOfFragments        *int
+	preTags               []string
+	postTags              []string
+	order                 *string
+	encoder               *string
+	requireFieldMatch     *bool
+	boundaryMaxScan       *int
+	boundaryChars         []rune
+	highlighterType       *string
+	fragmenter            *string
+	highlightQuery        Query
+	noMatchSize           *int
+	phraseLimit           *int
+	options               map[string]interface{}
+	forceSource           *bool
+	useExplicitFieldOrder bool
+}
+
+func NewHighlight() *Highlight {
+	hl := &Highlight{
+		fields:        make([]*HighlighterField, 0),
+		preTags:       make([]string, 0),
+		postTags:      make([]string, 0),
+		boundaryChars: make([]rune, 0),
+		options:       make(map[string]interface{}),
+	}
+	return hl
+}
+
+func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
+	hl.fields = append(hl.fields, fields...)
+	return hl
+}
+
+func (hl *Highlight) Field(name string) *Highlight {
+	field := NewHighlighterField(name)
+	hl.fields = append(hl.fields, field)
+	return hl
+}
+
+func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
+	hl.tagsSchema = &schemaName
+	return hl
+}
+
+func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
+	hl.highlightFilter = &highlightFilter
+	return hl
+}
+
+func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
+	hl.fragmentSize = &fragmentSize
+	return hl
+}
+
+func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
+	hl.numOfFragments = &numOfFragments
+	return hl
+}
+
+func (hl *Highlight) Encoder(encoder string) *Highlight {
+	hl.encoder = &encoder
+	return hl
+}
+
+func (hl *Highlight) PreTags(preTags ...string) *Highlight {
+	hl.preTags = make([]string, 0)
+	hl.preTags = append(hl.preTags, preTags...)
+	return hl
+}
+
+func (hl *Highlight) PostTags(postTags ...string) *Highlight {
+	hl.postTags = make([]string, 0)
+	hl.postTags = append(hl.postTags, postTags...)
+	return hl
+}
+
+func (hl *Highlight) Order(order string) *Highlight {
+	hl.order = &order
+	return hl
+}
+
+func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
+	hl.requireFieldMatch = &requireFieldMatch
+	return hl
+}
+
+func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
+	hl.boundaryMaxScan = &boundaryMaxScan
+	return hl
+}
+
+func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight {
+	hl.boundaryChars = make([]rune, 0)
+	hl.boundaryChars = append(hl.boundaryChars, boundaryChars...)
+	return hl
+}
+
+func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
+	hl.highlighterType = &highlighterType
+	return hl
+}
+
+func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
+	hl.fragmenter = &fragmenter
+	return hl
+}
+
+func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
+	hl.highlightQuery = highlightQuery
+	return hl
+}
+
+func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
+	hl.noMatchSize = &noMatchSize
+	return hl
+}
+
+func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
+	hl.options = options
+	return hl
+}
+
+func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
+	hl.forceSource = &forceSource
+	return hl
+}
+
+func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
+	hl.useExplicitFieldOrder = useExplicitFieldOrder
+	return hl
+}
+
+// Creates the query source for the bool query.
+func (hl *Highlight) Source() interface{} {
+	// Returns the map inside of "highlight":
+	// "highlight":{
+	//   ... this ...
+	// }
+	source := make(map[string]interface{})
+	if hl.tagsSchema != nil {
+		source["tags_schema"] = *hl.tagsSchema
+	}
+	if hl.preTags != nil && len(hl.preTags) > 0 {
+		source["pre_tags"] = hl.preTags
+	}
+	if hl.postTags != nil && len(hl.postTags) > 0 {
+		source["post_tags"] = hl.postTags
+	}
+	if hl.order != nil {
+		source["order"] = *hl.order
+	}
+	if hl.highlightFilter != nil {
+		source["highlight_filter"] = *hl.highlightFilter
+	}
+	if hl.fragmentSize != nil {
+		source["fragment_size"] = *hl.fragmentSize
+	}
+	if hl.numOfFragments != nil {
+		source["number_of_fragments"] = *hl.numOfFragments
+	}
+	if hl.encoder != nil {
+		source["encoder"] = *hl.encoder
+	}
+	if hl.requireFieldMatch != nil {
+		source["require_field_match"] = *hl.requireFieldMatch
+	}
+	if hl.boundaryMaxScan != nil {
+		source["boundary_max_scan"] = *hl.boundaryMaxScan
+	}
+	if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 {
+		source["boundary_chars"] = hl.boundaryChars
+	}
+	if hl.highlighterType != nil {
+		source["type"] = *hl.highlighterType
+	}
+	if hl.fragmenter != nil {
+		source["fragmenter"] = *hl.fragmenter
+	}
+	if hl.highlightQuery != nil {
+		source["highlight_query"] = hl.highlightQuery.Source()
+	}
+	if hl.noMatchSize != nil {
+		source["no_match_size"] = *hl.noMatchSize
+	}
+	if hl.phraseLimit != nil {
+		source["phrase_limit"] = *hl.phraseLimit
+	}
+	if hl.options != nil && len(hl.options) > 0 {
+		source["options"] = hl.options
+	}
+	if hl.forceSource != nil {
+		source["force_source"] = *hl.forceSource
+	}
+
+	if hl.fields != nil && len(hl.fields) > 0 {
+		if hl.useExplicitFieldOrder {
+			// Use a slice for the fields
+			fields := make([]map[string]interface{}, 0)
+			for _, field := range hl.fields {
+				fmap := make(map[string]interface{})
+				fmap[field.Name] = field.Source()
+				fields = append(fields, fmap)
+			}
+			source["fields"] = fields
+		} else {
+			// Use a map for the fields
+			fields := make(map[string]interface{}, 0)
+			for _, field := range hl.fields {
+				fields[field.Name] = field.Source()
+			}
+			source["fields"] = fields
+		}
+	}
+
+	return source
+
+	/*
+		highlightS := make(map[string]interface{})
+
+		if hl.tagsSchema != "" {
+			highlightS["tags_schema"] = hl.tagsSchema
+		}
+		if len(hl.preTags) > 0 {
+			highlightS["pre_tags"] = hl.preTags
+		}
+		if len(hl.postTags) > 0 {
+			highlightS["post_tags"] = hl.postTags
+		}
+		if hl.order != "" {
+			highlightS["order"] = hl.order
+		}
+		if hl.encoder != "" {
+			highlightS["encoder"] = hl.encoder
+		}
+		if hl.requireFieldMatch != nil {
+			highlightS["require_field_match"] = *hl.requireFieldMatch
+		}
+		if hl.highlighterType != "" {
+			highlightS["type"] = hl.highlighterType
+		}
+		if hl.fragmenter != "" {
+			highlightS["fragmenter"] = hl.fragmenter
+		}
+		if hl.highlightQuery != nil {
+			highlightS["highlight_query"] = hl.highlightQuery.Source()
+		}
+		if hl.noMatchSize != nil {
+			highlightS["no_match_size"] = *hl.noMatchSize
+		}
+		if len(hl.options) > 0 {
+			highlightS["options"] = hl.options
+		}
+		if hl.forceSource != nil {
+			highlightS["force_source"] = *hl.forceSource
+		}
+		if len(hl.fields) > 0 {
+			fieldsS := make(map[string]interface{})
+			for _, field := range hl.fields {
+				fieldsS[field.Name] = field.Source()
+			}
+			highlightS["fields"] = fieldsS
+		}
+
+		return highlightS
+	*/
+}
+
+// HighlighterField specifies a highlighted field.
+type HighlighterField struct {
+	Name string
+
+	preTags           []string
+	postTags          []string
+	fragmentSize      int
+	fragmentOffset    int
+	numOfFragments    int
+	highlightFilter   *bool
+	order             *string
+	requireFieldMatch *bool
+	boundaryMaxScan   int
+	boundaryChars     []rune
+	highlighterType   *string
+	fragmenter        *string
+	highlightQuery    Query
+	noMatchSize       *int
+	matchedFields     []string
+	phraseLimit       *int
+	options           map[string]interface{}
+	forceSource       *bool
+
+	/*
+		Name              string
+		preTags           []string
+		postTags          []string
+		fragmentSize      int
+		numOfFragments    int
+		fragmentOffset    int
+		highlightFilter   *bool
+		order             string
+		requireFieldMatch *bool
+		boundaryMaxScan   int
+		boundaryChars     []rune
+		highlighterType   string
+		fragmenter        string
+		highlightQuery    Query
+		noMatchSize       *int
+		matchedFields     []string
+		options           map[string]interface{}
+		forceSource       *bool
+	*/
+}
+
+func NewHighlighterField(name string) *HighlighterField {
+	return &HighlighterField{
+		Name:            name,
+		preTags:         make([]string, 0),
+		postTags:        make([]string, 0),
+		fragmentSize:    -1,
+		fragmentOffset:  -1,
+		numOfFragments:  -1,
+		boundaryMaxScan: -1,
+		boundaryChars:   make([]rune, 0),
+		matchedFields:   make([]string, 0),
+		options:         make(map[string]interface{}),
+	}
+}
+
+func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
+	f.preTags = make([]string, 0)
+	f.preTags = append(f.preTags, preTags...)
+	return f
+}
+
+func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
+	f.postTags = make([]string, 0)
+	f.postTags = append(f.postTags, postTags...)
+	return f
+}
+
+func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
+	f.fragmentSize = fragmentSize
+	return f
+}
+
+func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
+	f.fragmentOffset = fragmentOffset
+	return f
+}
+
+func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
+	f.numOfFragments = numOfFragments
+	return f
+}
+
+func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
+	f.highlightFilter = &highlightFilter
+	return f
+}
+
+func (f *HighlighterField) Order(order string) *HighlighterField {
+	f.order = &order
+	return f
+}
+
+func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
+	f.requireFieldMatch = &requireFieldMatch
+	return f
+}
+
+func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
+	f.boundaryMaxScan = boundaryMaxScan
+	return f
+}
+
+func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
+	f.boundaryChars = make([]rune, 0)
+	f.boundaryChars = append(f.boundaryChars, boundaryChars...)
+	return f
+}
+
+func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
+	f.highlighterType = &highlighterType
+	return f
+}
+
+func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
+	f.fragmenter = &fragmenter
+	return f
+}
+
+func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
+	f.highlightQuery = highlightQuery
+	return f
+}
+
+func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
+	f.noMatchSize = &noMatchSize
+	return f
+}
+
+func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
+	f.options = options
+	return f
+}
+
+func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
+	f.matchedFields = make([]string, 0)
+	f.matchedFields = append(f.matchedFields, matchedFields...)
+	return f
+}
+
+func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
+	f.phraseLimit = &phraseLimit
+	return f
+}
+
+func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
+	f.forceSource = &forceSource
+	return f
+}
+
+func (f *HighlighterField) Source() interface{} {
+	source := make(map[string]interface{})
+
+	if f.preTags != nil && len(f.preTags) > 0 {
+		source["pre_tags"] = f.preTags
+	}
+	if f.postTags != nil && len(f.postTags) > 0 {
+		source["post_tags"] = f.postTags
+	}
+	if f.fragmentSize != -1 {
+		source["fragment_size"] = f.fragmentSize
+	}
+	if f.numOfFragments != -1 {
+		source["number_of_fragments"] = f.numOfFragments
+	}
+	if f.fragmentOffset != -1 {
+		source["fragment_offset"] = f.fragmentOffset
+	}
+	if f.highlightFilter != nil {
+		source["highlight_filter"] = *f.highlightFilter
+	}
+	if f.order != nil {
+		source["order"] = *f.order
+	}
+	if f.requireFieldMatch != nil {
+		source["require_field_match"] = *f.requireFieldMatch
+	}
+	if f.boundaryMaxScan != -1 {
+		source["boundary_max_scan"] = f.boundaryMaxScan
+	}
+	if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
+		source["boundary_chars"] = f.boundaryChars
+	}
+	if f.highlighterType != nil {
+		source["type"] = *f.highlighterType
+	}
+	if f.fragmenter != nil {
+		source["fragmenter"] = *f.fragmenter
+	}
+	if f.highlightQuery != nil {
+		source["highlight_query"] = f.highlightQuery.Source()
+	}
+	if f.noMatchSize != nil {
+		source["no_match_size"] = *f.noMatchSize
+	}
+	if f.matchedFields != nil && len(f.matchedFields) > 0 {
+		source["matched_fields"] = f.matchedFields
+	}
+	if f.phraseLimit != nil {
+		source["phrase_limit"] = *f.phraseLimit
+	}
+	if f.options != nil && len(f.options) > 0 {
+		source["options"] = f.options
+	}
+	if f.forceSource != nil {
+		source["force_source"] = *f.forceSource
+	}
+
+	return source
+}

+ 217 - 0
github.com/olivere/elastic/index.go

@@ -0,0 +1,217 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndexResult is the result of indexing a document in Elasticsearch.
+type IndexResult struct {
+	Index   string `json:"_index"`
+	Type    string `json:"_type"`
+	Id      string `json:"_id"`
+	Version int    `json:"_version"`
+	Created bool   `json:"created"`
+}
+
+// IndexService adds documents to Elasticsearch.
+type IndexService struct {
+	client      *Client
+	index       string
+	_type       string
+	id          string
+	routing     string
+	parent      string
+	opType      string
+	refresh     *bool
+	version     *int64
+	versionType string
+	timestamp   string
+	ttl         string
+	timeout     string
+	bodyString  string
+	bodyJson    interface{}
+	pretty      bool
+}
+
+func NewIndexService(client *Client) *IndexService {
+	builder := &IndexService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *IndexService) Index(name string) *IndexService {
+	b.index = name
+	return b
+}
+
+func (b *IndexService) Type(_type string) *IndexService {
+	b._type = _type
+	return b
+}
+
+func (b *IndexService) Id(id string) *IndexService {
+	b.id = id
+	return b
+}
+
+func (b *IndexService) Routing(routing string) *IndexService {
+	b.routing = routing
+	return b
+}
+
+func (b *IndexService) Parent(parent string) *IndexService {
+	b.parent = parent
+	return b
+}
+
+// OpType is either "create" or "index" (the default).
+func (b *IndexService) OpType(opType string) *IndexService {
+	b.opType = opType
+	return b
+}
+
+func (b *IndexService) Refresh(refresh bool) *IndexService {
+	b.refresh = &refresh
+	return b
+}
+
+func (b *IndexService) Version(version int64) *IndexService {
+	b.version = &version
+	return b
+}
+
+// VersionType is either "internal" (default), "external",
+// "external_gt", "external_gte", or "force".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
+// for details.
+func (b *IndexService) VersionType(versionType string) *IndexService {
+	b.versionType = versionType
+	return b
+}
+
+func (b *IndexService) Timestamp(timestamp string) *IndexService {
+	b.timestamp = timestamp
+	return b
+}
+
+func (b *IndexService) TTL(ttl string) *IndexService {
+	b.ttl = ttl
+	return b
+}
+
+func (b *IndexService) Timeout(timeout string) *IndexService {
+	b.timeout = timeout
+	return b
+}
+
+func (b *IndexService) BodyString(body string) *IndexService {
+	b.bodyString = body
+	return b
+}
+
+func (b *IndexService) BodyJson(json interface{}) *IndexService {
+	b.bodyJson = json
+	return b
+}
+
+func (b *IndexService) Pretty(pretty bool) *IndexService {
+	b.pretty = pretty
+	return b
+}
+
+func (b *IndexService) Do() (*IndexResult, error) {
+	// Build url
+	var path, method string
+	if b.id != "" {
+		// Create document with manual id
+		method = "PUT"
+		path = "/{index}/{type}/{id}"
+	} else {
+		// Automatic ID generation
+		// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
+		method = "POST"
+		path = "/{index}/{type}/"
+	}
+	path, err := uritemplates.Expand(path, map[string]string{
+		"index": b.index,
+		"type":  b._type,
+		"id":    b.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Parameters
+	params := make(url.Values)
+	if b.pretty {
+		params.Set("pretty", "true")
+	}
+	if b.routing != "" {
+		params.Set("routing", b.routing)
+	}
+	if b.parent != "" {
+		params.Set("parent", b.parent)
+	}
+	if b.opType != "" {
+		params.Set("op_type", b.opType)
+	}
+	if b.refresh != nil && *b.refresh {
+		params.Set("refresh", "true")
+	}
+	if b.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *b.version))
+	}
+	if b.versionType != "" {
+		params.Set("version_type", b.versionType)
+	}
+	if b.timestamp != "" {
+		params.Set("timestamp", b.timestamp)
+	}
+	if b.ttl != "" {
+		params.Set("ttl", b.ttl)
+	}
+	if b.timeout != "" {
+		params.Set("timeout", b.timeout)
+	}
+
+	/*
+		routing string
+		parent string
+		opType string
+		refresh *bool
+		version *int64
+		versionType string
+		timestamp string
+		ttl string
+	*/
+
+	// Body
+	var body interface{}
+	if b.bodyJson != nil {
+		body = b.bodyJson
+	} else {
+		body = b.bodyString
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest(method, path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(IndexResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}

+ 145 - 0
github.com/olivere/elastic/index_close.go

@@ -0,0 +1,145 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// CloseIndexService closes an index.
+// See documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type CloseIndexService struct {
+	client            *Client
+	pretty            bool
+	index             string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+	timeout           string
+	masterTimeout     string
+}
+
+// NewCloseIndexService creates a new CloseIndexService.
+func NewCloseIndexService(client *Client) *CloseIndexService {
+	return &CloseIndexService{client: client}
+}
+
+// Index is the name of the index.
+func (s *CloseIndexService) Index(index string) *CloseIndexService {
+	s.index = index
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *CloseIndexService) Timeout(timeout string) *CloseIndexService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *CloseIndexService) MasterTimeout(masterTimeout string) *CloseIndexService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *CloseIndexService) IgnoreUnavailable(ignoreUnavailable bool) *CloseIndexService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
+func (s *CloseIndexService) AllowNoIndices(allowNoIndices bool) *CloseIndexService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *CloseIndexService) ExpandWildcards(expandWildcards string) *CloseIndexService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *CloseIndexService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/_close", map[string]string{
+		"index": s.index,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *CloseIndexService) Validate() error {
+	var invalid []string
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *CloseIndexService) Do() (*CloseIndexResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("POST", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(CloseIndexResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// CloseIndexResponse is the response of CloseIndexService.Do.
+type CloseIndexResponse struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 50 - 0
github.com/olivere/elastic/index_exists.go

@@ -0,0 +1,50 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type IndexExistsService struct {
+	client *Client
+	index  string
+}
+
+func NewIndexExistsService(client *Client) *IndexExistsService {
+	builder := &IndexExistsService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *IndexExistsService) Index(index string) *IndexExistsService {
+	b.index = index
+	return b
+}
+
+func (b *IndexExistsService) Do() (bool, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}", map[string]string{
+		"index": b.index,
+	})
+	if err != nil {
+		return false, err
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest("HEAD", path, nil, nil)
+	if err != nil {
+		return false, err
+	}
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 186 - 0
github.com/olivere/elastic/index_get.go

@@ -0,0 +1,186 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// IndicesGetService retrieves information about one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-index.html.
+type IndicesGetService struct {
+	client            *Client
+	pretty            bool
+	index             []string
+	feature           []string
+	expandWildcards   string
+	local             *bool
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+}
+
+// NewIndicesGetService creates a new IndicesGetService.
+func NewIndicesGetService(client *Client) *IndicesGetService {
+	return &IndicesGetService{
+		client:  client,
+		index:   make([]string, 0),
+		feature: make([]string, 0),
+	}
+}
+
+// Index is a list of index names. Use _all to retrieve information about
+// all indices of a cluster.
+func (s *IndicesGetService) Index(index ...string) *IndicesGetService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Feature is a list of features (e.g. _settings,_mappings,_warmers, and _aliases).
+func (s *IndicesGetService) Feature(feature ...string) *IndicesGetService {
+	s.feature = append(s.feature, feature...)
+	return s
+}
+
+// ExpandWildcards indicates whether wildcard expressions should
+// get expanded to open or closed indices (default: open).
+func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// Local indicates whether to return local information (do not retrieve
+// the state from master node (default: false)).
+func (s *IndicesGetService) Local(local bool) *IndicesGetService {
+	s.local = &local
+	return s
+}
+
+// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false).
+func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard expression
+// resolves to no concrete indices (default: false).
+func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetService) buildURL() (string, url.Values, error) {
+	var err error
+	var path string
+	var index []string
+
+	if len(s.index) > 0 {
+		index = s.index
+	} else {
+		index = []string{"_all"}
+	}
+
+	if len(s.feature) > 0 {
+		// Build URL
+		path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{
+			"index":   strings.Join(index, ","),
+			"feature": strings.Join(s.feature, ","),
+		})
+	} else {
+		// Build URL
+		path, err = uritemplates.Expand("/{index}", map[string]string{
+			"index": strings.Join(index, ","),
+		})
+	}
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetService) Validate() error {
+	var invalid []string
+	if len(s.index) == 0 {
+		invalid = append(invalid, "Index")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	var ret map[string]*IndicesGetResponse
+	if err := json.Unmarshal(res.Body, &ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesGetResponse is part of the response of IndicesGetService.Do.
+type IndicesGetResponse struct {
+	Aliases  map[string]interface{} `json:"aliases"`
+	Mappings map[string]interface{} `json:"mappings"`
+	Settings map[string]interface{} `json:"settings"`
+	Warmers  map[string]interface{} `json:"warmers"`
+}

+ 189 - 0
github.com/olivere/elastic/index_get_settings.go

@@ -0,0 +1,189 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// IndicesGetSettingsService allows to retrieve settings of one
+// or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-get-settings.html.
+type IndicesGetSettingsService struct {
+	client            *Client
+	pretty            bool
+	index             []string
+	name              []string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+	flatSettings      *bool
+	local             *bool
+}
+
+// NewIndicesGetSettingsService creates a new IndicesGetSettingsService.
+func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService {
+	return &IndicesGetSettingsService{
+		client: client,
+		index:  make([]string, 0),
+		name:   make([]string, 0),
+	}
+}
+
+// Index is a list of index names; use `_all` or empty string to perform the operation on all indices.
+func (s *IndicesGetSettingsService) Index(index ...string) *IndicesGetSettingsService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Name are the names of the settings that should be included.
+func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService {
+	s.name = append(s.name, name...)
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression
+// to concrete indices that are open, closed or both.
+// Options: open, closed, none, all. Default: open,closed.
+func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService {
+	s.flatSettings = &flatSettings
+	return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService {
+	s.local = &local
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) {
+	var err error
+	var path string
+	var index []string
+
+	if len(s.index) > 0 {
+		index = s.index
+	} else {
+		index = []string{"_all"}
+	}
+
+	if len(s.name) > 0 {
+		// Build URL
+		path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{
+			"index": strings.Join(index, ","),
+			"name":  strings.Join(s.name, ","),
+		})
+	} else {
+		// Build URL
+		path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
+			"index": strings.Join(index, ","),
+		})
+	}
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.flatSettings != nil {
+		params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetSettingsService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	var ret map[string]*IndicesGetSettingsResponse
+	if err := json.Unmarshal(res.Body, &ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do.
+type IndicesGetSettingsResponse struct {
+	Settings map[string]interface{} `json:"settings"`
+}

+ 146 - 0
github.com/olivere/elastic/index_open.go

@@ -0,0 +1,146 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// OpenIndexService opens an index.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type OpenIndexService struct {
+	client            *Client
+	pretty            bool
+	index             string
+	expandWildcards   string
+	timeout           string
+	masterTimeout     string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+}
+
+// NewOpenIndexService creates a new OpenIndexService.
+func NewOpenIndexService(client *Client) *OpenIndexService {
+	return &OpenIndexService{client: client}
+}
+
+// Index is the name of the index to open.
+func (s *OpenIndexService) Index(index string) *OpenIndexService {
+	s.index = index
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *OpenIndexService) Timeout(timeout string) *OpenIndexService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *OpenIndexService) MasterTimeout(masterTimeout string) *OpenIndexService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *OpenIndexService) IgnoreUnavailable(ignoreUnavailable bool) *OpenIndexService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *OpenIndexService) AllowNoIndices(allowNoIndices bool) *OpenIndexService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *OpenIndexService) ExpandWildcards(expandWildcards string) *OpenIndexService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *OpenIndexService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/_open", map[string]string{
+		"index": s.index,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *OpenIndexService) Validate() error {
+	var invalid []string
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *OpenIndexService) Do() (*OpenIndexResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("POST", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(OpenIndexResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// OpenIndexResponse is the response of OpenIndexService.Do.
+type OpenIndexResponse struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 122 - 0
github.com/olivere/elastic/indices_delete_template.go

@@ -0,0 +1,122 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesDeleteTemplateService deletes index templates.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesDeleteTemplateService struct {
+	client        *Client
+	pretty        bool
+	name          string
+	timeout       string
+	masterTimeout string
+}
+
+// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
+func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
+	return &IndicesDeleteTemplateService{
+		client: client,
+	}
+}
+
+// Name is the name of the template.
+func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
+	s.name = name
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+		"name": s.name,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesDeleteTemplateService) Validate() error {
+	var invalid []string
+	if s.name == "" {
+		invalid = append(invalid, "Name")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("DELETE", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(IndicesDeleteTemplateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
+type IndicesDeleteTemplateResponse struct {
+	Acknowledged bool `json:"acknowledged,omitempty"`
+}

+ 107 - 0
github.com/olivere/elastic/indices_exists_template.go

@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesExistsTemplateService checks if a given template exists.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists
+// for documentation.
+type IndicesExistsTemplateService struct {
+	client *Client
+	pretty bool
+	name   string
+	local  *bool
+}
+
+// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
+func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
+	return &IndicesExistsTemplateService{
+		client: client,
+	}
+}
+
+// Name is the name of the template.
+func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
+	s.name = name
+	return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
+	s.local = &local
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+		"name": s.name,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTemplateService) Validate() error {
+	var invalid []string
+	if s.name == "" {
+		invalid = append(invalid, "Name")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTemplateService) Do() (bool, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return false, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return false, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("HEAD", path, params, nil)
+	if err != nil {
+		return false, err
+	}
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 155 - 0
github.com/olivere/elastic/indices_exists_type.go

@@ -0,0 +1,155 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesExistsTypeService checks if one or more types exist in one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-types-exists.html.
+type IndicesExistsTypeService struct {
+	client            *Client
+	pretty            bool
+	index             []string
+	typ               []string
+	allowNoIndices    *bool
+	expandWildcards   string
+	local             *bool
+	ignoreUnavailable *bool
+}
+
+// NewIndicesExistsTypeService creates a new IndicesExistsTypeService.
+func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
+	return &IndicesExistsTypeService{
+		client: client,
+		index:  make([]string, 0),
+		typ:    make([]string, 0),
+	}
+}
+
+// Index is a list of index names; use `_all` to check the types across all indices.
+func (s *IndicesExistsTypeService) Index(index ...string) *IndicesExistsTypeService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Type is a list of document types to check.
+func (s *IndicesExistsTypeService) Type(typ ...string) *IndicesExistsTypeService {
+	s.typ = append(s.typ, typ...)
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// Local specifies whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService {
+	s.local = &local
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
+	if err := s.Validate(); err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/{type}", map[string]string{
+		"type":  strings.Join(s.typ, ","),
+		"index": strings.Join(s.index, ","),
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTypeService) Validate() error {
+	var invalid []string
+	if len(s.index) == 0 {
+		invalid = append(invalid, "Index")
+	}
+	if len(s.typ) == 0 {
+		invalid = append(invalid, "Type")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTypeService) Do() (bool, error) {
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return false, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("HEAD", path, params, nil)
+	if err != nil {
+		return false, err
+	}
+
+	// Return operation response
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 128 - 0
github.com/olivere/elastic/indices_get_template.go

@@ -0,0 +1,128 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesGetTemplateService returns an index template.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesGetTemplateService struct {
+	client       *Client
+	pretty       bool
+	name         []string
+	flatSettings *bool
+	local        *bool
+}
+
+// NewIndicesGetTemplateService creates a new IndicesGetTemplateService.
+func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService {
+	return &IndicesGetTemplateService{
+		client: client,
+		name:   make([]string, 0),
+	}
+}
+
+// Name is the name of the index template.
+func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService {
+	s.name = append(s.name, name...)
+	return s
+}
+
+// FlatSettings is returns settings in flat format (default: false).
+func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService {
+	s.flatSettings = &flatSettings
+	return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService {
+	s.local = &local
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	var err error
+	var path string
+	if len(s.name) > 0 {
+		path, err = uritemplates.Expand("/_template/{name}", map[string]string{
+			"name": strings.Join(s.name, ","),
+		})
+	} else {
+		path = "/_template"
+	}
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.flatSettings != nil {
+		params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetTemplateService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	var ret map[string]*IndicesGetTemplateResponse
+	if err := json.Unmarshal(res.Body, &ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do.
+type IndicesGetTemplateResponse struct {
+	Order    int                    `json:"order,omitempty"`
+	Template string                 `json:"template,omitempty"`
+	Settings map[string]interface{} `json:"settings,omitempty"`
+	Mappings map[string]interface{} `json:"mappings,omitempty"`
+	Aliases  map[string]interface{} `json:"aliases,omitempty"`
+}

+ 179 - 0
github.com/olivere/elastic/indices_put_template.go

@@ -0,0 +1,179 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesPutTemplateService creates or updates index mappings.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesPutTemplateService struct {
+	client        *Client
+	pretty        bool
+	name          string
+	order         interface{}
+	create        *bool
+	timeout       string
+	masterTimeout string
+	flatSettings  *bool
+	bodyJson      interface{}
+	bodyString    string
+}
+
+// NewIndicesPutTemplateService creates a new IndicesPutTemplateService.
+func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService {
+	return &IndicesPutTemplateService{
+		client: client,
+	}
+}
+
+// Name is the name of the index template.
+func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService {
+	s.name = name
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService {
+	s.flatSettings = &flatSettings
+	return s
+}
+
+// Order is the order for this template when merging multiple matching ones
+// (higher numbers are merged later, overriding the lower numbers).
+func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService {
+	s.order = order
+	return s
+}
+
+// Create indicates whether the index template should only be added if
+// new or can also replace an existing one.
+func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService {
+	s.create = &create
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService {
+	s.pretty = pretty
+	return s
+}
+
+// BodyJson is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService {
+	s.bodyJson = body
+	return s
+}
+
+// BodyString is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService {
+	s.bodyString = body
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+		"name": s.name,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.order != nil {
+		params.Set("order", fmt.Sprintf("%v", s.order))
+	}
+	if s.create != nil {
+		params.Set("create", fmt.Sprintf("%v", *s.create))
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.flatSettings != nil {
+		params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesPutTemplateService) Validate() error {
+	var invalid []string
+	if s.name == "" {
+		invalid = append(invalid, "Name")
+	}
+	if s.bodyString == "" && s.bodyJson == nil {
+		invalid = append(invalid, "BodyJson")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request body
+	var body interface{}
+	if s.bodyJson != nil {
+		body = s.bodyJson
+	} else {
+		body = s.bodyString
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("PUT", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(IndicesPutTemplateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do.
+type IndicesPutTemplateResponse struct {
+	Acknowledged bool `json:"acknowledged,omitempty"`
+}

+ 385 - 0
github.com/olivere/elastic/indices_stats.go

@@ -0,0 +1,385 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesStatsService provides stats on various metrics of one or more
+// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html.
+type IndicesStatsService struct {
+	client           *Client
+	pretty           bool
+	metric           []string
+	index            []string
+	level            string
+	types            []string
+	completionFields []string
+	fielddataFields  []string
+	fields           []string
+	groups           []string
+	human            *bool
+}
+
+// NewIndicesStatsService creates a new IndicesStatsService.
+func NewIndicesStatsService(client *Client) *IndicesStatsService {
+	return &IndicesStatsService{
+		client:           client,
+		index:            make([]string, 0),
+		metric:           make([]string, 0),
+		completionFields: make([]string, 0),
+		fielddataFields:  make([]string, 0),
+		fields:           make([]string, 0),
+		groups:           make([]string, 0),
+		types:            make([]string, 0),
+	}
+}
+
+// Metric limits the information returned the specific metrics. Options are:
+// docs, store, indexing, get, search, completion, fielddata, flush, merge,
+// query_cache, refresh, suggest, and warmer.
+func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService {
+	s.metric = append(s.metric, metric...)
+	return s
+}
+
+// Index is the list of index names; use `_all` or empty string to perform
+// the operation on all indices.
+func (s *IndicesStatsService) Index(index ...string) *IndicesStatsService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Level returns stats aggregated at cluster, index or shard level.
+func (s *IndicesStatsService) Level(level string) *IndicesStatsService {
+	s.level = level
+	return s
+}
+
+// Types is a list of document types for the `indexing` index metric.
+func (s *IndicesStatsService) Types(types ...string) *IndicesStatsService {
+	s.types = append(s.types, types...)
+	return s
+}
+
+// CompletionFields is a list of fields for `fielddata` and `suggest`
+// index metric (supports wildcards).
+func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService {
+	s.completionFields = append(s.completionFields, completionFields...)
+	return s
+}
+
+// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards).
+func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService {
+	s.fielddataFields = append(s.fielddataFields, fielddataFields...)
+	return s
+}
+
+// Fields is a list of fields for `fielddata` and `completion` index metric
+// (supports wildcards).
+func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService {
+	s.fields = append(s.fields, fields...)
+	return s
+}
+
+// Groups is a list of search groups for `search` index metric.
+func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService {
+	s.groups = append(s.groups, groups...)
+	return s
+}
+
+// Human indicates whether to return time and byte values in human-readable format..
+func (s *IndicesStatsService) Human(human bool) *IndicesStatsService {
+	s.human = &human
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesStatsService) buildURL() (string, url.Values, error) {
+	var err error
+	var path string
+	if len(s.index) > 0 && len(s.metric) > 0 {
+		path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{
+			"index":  strings.Join(s.index, ","),
+			"metric": strings.Join(s.metric, ","),
+		})
+	} else if len(s.index) > 0 {
+		path, err = uritemplates.Expand("/{index}/_stats", map[string]string{
+			"index": strings.Join(s.index, ","),
+		})
+	} else if len(s.metric) > 0 {
+		path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{
+			"metric": strings.Join(s.metric, ","),
+		})
+	} else {
+		path = "/_stats"
+	}
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if len(s.groups) > 0 {
+		params.Set("groups", strings.Join(s.groups, ","))
+	}
+	if s.human != nil {
+		params.Set("human", fmt.Sprintf("%v", *s.human))
+	}
+	if s.level != "" {
+		params.Set("level", s.level)
+	}
+	if len(s.types) > 0 {
+		params.Set("types", strings.Join(s.types, ","))
+	}
+	if len(s.completionFields) > 0 {
+		params.Set("completion_fields", strings.Join(s.completionFields, ","))
+	}
+	if len(s.fielddataFields) > 0 {
+		params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
+	}
+	if len(s.fields) > 0 {
+		params.Set("fields", strings.Join(s.fields, ","))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesStatsService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(IndicesStatsResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesStatsResponse is the response of IndicesStatsService.Do.
+type IndicesStatsResponse struct {
+	// Shards provides information returned from shards.
+	Shards shardsInfo `json:"_shards"`
+
+	// All provides summary stats about all indices.
+	All *IndexStats `json:"_all,omitempty"`
+
+	// Indices provides a map into the stats of an index. The key of the
+	// map is the index name.
+	Indices map[string]*IndexStats `json:"indices,omitempty"`
+}
+
+// IndexStats is index stats for a specific index.
+type IndexStats struct {
+	Primaries *IndexStatsDetails `json:"primaries,omitempty"`
+	Total     *IndexStatsDetails `json:"total,omitempty"`
+}
+
+type IndexStatsDetails struct {
+	Docs        *IndexStatsDocs        `json:"docs,omitempty"`
+	Store       *IndexStatsStore       `json:"store,omitempty"`
+	Indexing    *IndexStatsIndexing    `json:"indexing,omitempty"`
+	Get         *IndexStatsGet         `json:"get,omitempty"`
+	Search      *IndexStatsSearch      `json:"search,omitempty"`
+	Merges      *IndexStatsMerges      `json:"merges,omitempty"`
+	Refresh     *IndexStatsRefresh     `json:"refresh,omitempty"`
+	Flush       *IndexStatsFlush       `json:"flush,omitempty"`
+	Warmer      *IndexStatsWarmer      `json:"warmer,omitempty"`
+	FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"`
+	IdCache     *IndexStatsIdCache     `json:"id_cache,omitempty"`
+	Fielddata   *IndexStatsFielddata   `json:"fielddata,omitempty"`
+	Percolate   *IndexStatsPercolate   `json:"percolate,omitempty"`
+	Completion  *IndexStatsCompletion  `json:"completion,omitempty"`
+	Segments    *IndexStatsSegments    `json:"segments,omitempty"`
+	Translog    *IndexStatsTranslog    `json:"translog,omitempty"`
+	Suggest     *IndexStatsSuggest     `json:"suggest,omitempty"`
+	QueryCache  *IndexStatsQueryCache  `json:"query_cache,omitempty"`
+}
+
+type IndexStatsDocs struct {
+	Count   int64 `json:"count,omitempty"`
+	Deleted int64 `json:"deleted,omitempty"`
+}
+
+type IndexStatsStore struct {
+	Size                 string `json:"size,omitempty"` // human size, e.g. 119.3mb
+	SizeInBytes          int64  `json:"size_in_bytes,omitempty"`
+	ThrottleTime         string `json:"throttle_time,omitempty"` // human time, e.g. 0s
+	ThrottleTimeInMillis int64  `json:"throttle_time_in_millis,omitempty"`
+}
+
+type IndexStatsIndexing struct {
+	IndexTotal           int64  `json:"index_total,omitempty"`
+	IndexTime            string `json:"index_time,omitempty"`
+	IndexTimeInMillis    int64  `json:"index_time_in_millis,omitempty"`
+	IndexCurrent         int64  `json:"index_current,omitempty"`
+	DeleteTotal          int64  `json:"delete_total,omitempty"`
+	DeleteTime           string `json:"delete_time,omitempty"`
+	DeleteTimeInMillis   int64  `json:"delete_time_in_millis,omitempty"`
+	DeleteCurrent        int64  `json:"delete_current,omitempty"`
+	NoopUpdateTotal      int64  `json:"noop_update_total,omitempty"`
+	IsThrottled          bool   `json:"is_throttled,omitempty"`
+	ThrottleTime         string `json:"throttle_time,omitempty"`
+	ThrottleTimeInMillis int64  `json:"throttle_time_in_millis,omitempty"`
+}
+
+type IndexStatsGet struct {
+	Total               int64  `json:"total,omitempty"`
+	GetTime             string `json:"get_time,omitempty"`
+	TimeInMillis        int64  `json:"time_in_millis,omitempty"`
+	ExistsTotal         int64  `json:"exists_total,omitempty"`
+	ExistsTime          string `json:"exists_time,omitempty"`
+	ExistsTimeInMillis  int64  `json:"exists_time_in_millis,omitempty"`
+	MissingTotal        int64  `json:"missing_total,omitempty"`
+	MissingTime         string `json:"missing_time,omitempty"`
+	MissingTimeInMillis int64  `json:"missing_time_in_millis,omitempty"`
+	Current             int64  `json:"current,omitempty"`
+}
+
+type IndexStatsSearch struct {
+	OpenContexts      int64  `json:"open_contexts,omitempty"`
+	QueryTotal        int64  `json:"query_total,omitempty"`
+	QueryTime         string `json:"query_time,omitempty"`
+	QueryTimeInMillis int64  `json:"query_time_in_millis,omitempty"`
+	QueryCurrent      int64  `json:"query_current,omitempty"`
+	FetchTotal        int64  `json:"fetch_total,omitempty"`
+	FetchTime         string `json:"fetch_time,omitempty"`
+	FetchTimeInMillis int64  `json:"fetch_time_in_millis,omitempty"`
+	FetchCurrent      int64  `json:"fetch_current,omitempty"`
+}
+
+type IndexStatsMerges struct {
+	Current            int64  `json:"current,omitempty"`
+	CurrentDocs        int64  `json:"current_docs,omitempty"`
+	CurrentSize        string `json:"current_size,omitempty"`
+	CurrentSizeInBytes int64  `json:"current_size_in_bytes,omitempty"`
+	Total              int64  `json:"total,omitempty"`
+	TotalTime          string `json:"total_time,omitempty"`
+	TotalTimeInMillis  int64  `json:"total_time_in_millis,omitempty"`
+	TotalDocs          int64  `json:"total_docs,omitempty"`
+	TotalSize          string `json:"total_size,omitempty"`
+	TotalSizeInBytes   int64  `json:"total_size_in_bytes,omitempty"`
+}
+
+type IndexStatsRefresh struct {
+	Total             int64  `json:"total,omitempty"`
+	TotalTime         string `json:"total_time,omitempty"`
+	TotalTimeInMillis int64  `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsFlush struct {
+	Total             int64  `json:"total,omitempty"`
+	TotalTime         string `json:"total_time,omitempty"`
+	TotalTimeInMillis int64  `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsWarmer struct {
+	Current           int64  `json:"current,omitempty"`
+	Total             int64  `json:"total,omitempty"`
+	TotalTime         string `json:"total_time,omitempty"`
+	TotalTimeInMillis int64  `json:"total_time_in_millis,omitempty"`
+}
+
+type IndexStatsFilterCache struct {
+	MemorySize        string `json:"memory_size,omitempty"`
+	MemorySizeInBytes int64  `json:"memory_size_in_bytes,omitempty"`
+	Evictions         int64  `json:"evictions,omitempty"`
+}
+
+type IndexStatsIdCache struct {
+	MemorySize        string `json:"memory_size,omitempty"`
+	MemorySizeInBytes int64  `json:"memory_size_in_bytes,omitempty"`
+}
+
+type IndexStatsFielddata struct {
+	MemorySize        string `json:"memory_size,omitempty"`
+	MemorySizeInBytes int64  `json:"memory_size_in_bytes,omitempty"`
+	Evictions         int64  `json:"evictions,omitempty"`
+}
+
+type IndexStatsPercolate struct {
+	Total             int64  `json:"total,omitempty"`
+	GetTime           string `json:"get_time,omitempty"`
+	TimeInMillis      int64  `json:"time_in_millis,omitempty"`
+	Current           int64  `json:"current,omitempty"`
+	MemorySize        string `json:"memory_size,omitempty"`
+	MemorySizeInBytes int64  `json:"memory_size_in_bytes,omitempty"`
+	Queries           int64  `json:"queries,omitempty"`
+}
+
+type IndexStatsCompletion struct {
+	Size        string `json:"size,omitempty"`
+	SizeInBytes int64  `json:"size_in_bytes,omitempty"`
+}
+
+type IndexStatsSegments struct {
+	Count                       int64  `json:"count,omitempty"`
+	Memory                      string `json:"memory,omitempty"`
+	MemoryInBytes               int64  `json:"memory_in_bytes,omitempty"`
+	IndexWriterMemory           string `json:"index_writer_memory,omitempty"`
+	IndexWriterMemoryInBytes    int64  `json:"index_writer_memory_in_bytes,omitempty"`
+	IndexWriterMaxMemory        string `json:"index_writer_max_memory,omitempty"`
+	IndexWriterMaxMemoryInBytes int64  `json:"index_writer_max_memory_in_bytes,omitempty"`
+	VersionMapMemory            string `json:"version_map_memory,omitempty"`
+	VersionMapMemoryInBytes     int64  `json:"version_map_memory_in_bytes,omitempty"`
+	FixedBitSetMemory           string `json:"fixed_bit_set,omitempty"`
+	FixedBitSetMemoryInBytes    int64  `json:"fixed_bit_set_memory_in_bytes,omitempty"`
+}
+
+type IndexStatsTranslog struct {
+	Operations  int64  `json:"operations,omitempty"`
+	Size        string `json:"size,omitempty"`
+	SizeInBytes int64  `json:"size_in_bytes,omitempty"`
+}
+
+type IndexStatsSuggest struct {
+	Total        int64  `json:"total,omitempty"`
+	Time         string `json:"time,omitempty"`
+	TimeInMillis int64  `json:"time_in_millis,omitempty"`
+	Current      int64  `json:"current,omitempty"`
+}
+
+type IndexStatsQueryCache struct {
+	MemorySize        string `json:"memory_size,omitempty"`
+	MemorySizeInBytes int64  `json:"memory_size_in_bytes,omitempty"`
+	Evictions         int64  `json:"evictions,omitempty"`
+	HitCount          int64  `json:"hit_count,omitempty"`
+	MissCount         int64  `json:"miss_count,omitempty"`
+}

+ 194 - 0
github.com/olivere/elastic/multi_get.go

@@ -0,0 +1,194 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+)
+
+type MultiGetService struct {
+	client     *Client
+	preference string
+	realtime   *bool
+	refresh    *bool
+	items      []*MultiGetItem
+}
+
+func NewMultiGetService(client *Client) *MultiGetService {
+	builder := &MultiGetService{
+		client: client,
+		items:  make([]*MultiGetItem, 0),
+	}
+	return builder
+}
+
+func (b *MultiGetService) Preference(preference string) *MultiGetService {
+	b.preference = preference
+	return b
+}
+
+func (b *MultiGetService) Refresh(refresh bool) *MultiGetService {
+	b.refresh = &refresh
+	return b
+}
+
+func (b *MultiGetService) Realtime(realtime bool) *MultiGetService {
+	b.realtime = &realtime
+	return b
+}
+
+func (b *MultiGetService) Add(items ...*MultiGetItem) *MultiGetService {
+	b.items = append(b.items, items...)
+	return b
+}
+
+func (b *MultiGetService) Source() interface{} {
+	source := make(map[string]interface{})
+	items := make([]interface{}, len(b.items))
+	for i, item := range b.items {
+		items[i] = item.Source()
+	}
+	source["docs"] = items
+	return source
+}
+
+func (b *MultiGetService) Do() (*MultiGetResult, error) {
+	// Build url
+	path := "/_mget"
+
+	params := make(url.Values)
+	if b.realtime != nil {
+		params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+	}
+	if b.preference != "" {
+		params.Add("preference", b.preference)
+	}
+	if b.refresh != nil {
+		params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+	}
+
+	// Set body
+	body := b.Source()
+
+	// Get response
+	res, err := b.client.PerformRequest("GET", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(MultiGetResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Multi Get Item --
+
+// MultiGetItem is a single document to retrieve via the MultiGetService.
+type MultiGetItem struct {
+	index       string
+	typ         string
+	id          string
+	routing     string
+	fields      []string
+	version     *int64 // see org.elasticsearch.common.lucene.uid.Versions
+	versionType string // see org.elasticsearch.index.VersionType
+	fsc         *FetchSourceContext
+}
+
+func NewMultiGetItem() *MultiGetItem {
+	return &MultiGetItem{}
+}
+
+func (item *MultiGetItem) Index(index string) *MultiGetItem {
+	item.index = index
+	return item
+}
+
+func (item *MultiGetItem) Type(typ string) *MultiGetItem {
+	item.typ = typ
+	return item
+}
+
+func (item *MultiGetItem) Id(id string) *MultiGetItem {
+	item.id = id
+	return item
+}
+
+func (item *MultiGetItem) Routing(routing string) *MultiGetItem {
+	item.routing = routing
+	return item
+}
+
+func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem {
+	if item.fields == nil {
+		item.fields = make([]string, 0)
+	}
+	item.fields = append(item.fields, fields...)
+	return item
+}
+
+// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1),
+// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions.
+// The default in Elasticsearch is MatchAny (-3).
+func (item *MultiGetItem) Version(version int64) *MultiGetItem {
+	item.version = &version
+	return item
+}
+
+// VersionType can be "internal", "external", "external_gt", "external_gte",
+// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source.
+// It is "internal" by default.
+func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem {
+	item.versionType = versionType
+	return item
+}
+
+func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem {
+	item.fsc = fetchSourceContext
+	return item
+}
+
+// Source returns the serialized JSON to be sent to Elasticsearch as
+// part of a MultiGet search.
+func (item *MultiGetItem) Source() interface{} {
+	source := make(map[string]interface{})
+
+	source["_id"] = item.id
+
+	if item.index != "" {
+		source["_index"] = item.index
+	}
+	if item.typ != "" {
+		source["_type"] = item.typ
+	}
+	if item.fsc != nil {
+		source["_source"] = item.fsc.Source()
+	}
+	if item.fields != nil {
+		source["_fields"] = item.fields
+	}
+	if item.routing != "" {
+		source["_routing"] = item.routing
+	}
+	if item.version != nil {
+		source["version"] = fmt.Sprintf("%d", *item.version)
+	}
+	if item.versionType != "" {
+		source["version_type"] = item.versionType
+	}
+
+	return source
+}
+
+// -- Result of a Multi Get request.
+
+type MultiGetResult struct {
+	Docs []*GetResult `json:"docs,omitempty"`
+}

+ 101 - 0
github.com/olivere/elastic/multi_search.go

@@ -0,0 +1,101 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+)
+
+// MultiSearch executes one or more searches in one roundtrip.
+// See http://www.elasticsearch.org/guide/reference/api/multi-search/
+type MultiSearchService struct {
+	client     *Client
+	requests   []*SearchRequest
+	indices    []string
+	pretty     bool
+	routing    string
+	preference string
+}
+
+func NewMultiSearchService(client *Client) *MultiSearchService {
+	builder := &MultiSearchService{
+		client:   client,
+		requests: make([]*SearchRequest, 0),
+		indices:  make([]string, 0),
+	}
+	return builder
+}
+
+func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService {
+	s.requests = append(s.requests, requests...)
+	return s
+}
+
+func (s *MultiSearchService) Index(index string) *MultiSearchService {
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *MultiSearchService) Indices(indices ...string) *MultiSearchService {
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *MultiSearchService) Do() (*MultiSearchResult, error) {
+	// Build url
+	path := "/_msearch"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Set body
+	lines := make([]string, 0)
+	for _, sr := range s.requests {
+		// Set default indices if not specified in the request
+		if !sr.HasIndices() && len(s.indices) > 0 {
+			sr = sr.Indices(s.indices...)
+		}
+
+		header, err := json.Marshal(sr.header())
+		if err != nil {
+			return nil, err
+		}
+		body, err := json.Marshal(sr.body())
+		if err != nil {
+			return nil, err
+		}
+		lines = append(lines, string(header))
+		lines = append(lines, string(body))
+	}
+	body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n
+
+	// Get response
+	res, err := s.client.PerformRequest("GET", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(MultiSearchResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+type MultiSearchResult struct {
+	Responses []*SearchResult `json:"responses,omitempty"`
+}

+ 311 - 0
github.com/olivere/elastic/nodes_info.go

@@ -0,0 +1,311 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+	"time"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// NodesInfoService allows to retrieve one or more or all of the
+// cluster nodes information.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html.
+type NodesInfoService struct {
+	client       *Client
+	pretty       bool
+	nodeId       []string
+	metric       []string
+	flatSettings *bool
+	human        *bool
+}
+
+// NewNodesInfoService creates a new NodesInfoService.
+func NewNodesInfoService(client *Client) *NodesInfoService {
+	return &NodesInfoService{
+		client: client,
+		nodeId: []string{"_all"},
+		metric: []string{"_all"},
+	}
+}
+
+// NodeId is a list of node IDs or names to limit the returned information.
+// Use "_local" to return information from the node you're connecting to,
+// leave empty to get information from all nodes.
+func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService {
+	s.nodeId = make([]string, 0)
+	s.nodeId = append(s.nodeId, nodeId...)
+	return s
+}
+
+// Metric is a list of metrics you wish returned. Leave empty to return all.
+// Valid metrics are: settings, os, process, jvm, thread_pool, network,
+// transport, http, and plugins.
+func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService {
+	s.metric = make([]string, 0)
+	s.metric = append(s.metric, metric...)
+	return s
+}
+
+// FlatSettings returns settings in flat format (default: false).
+func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService {
+	s.flatSettings = &flatSettings
+	return s
+}
+
+// Human indicates whether to return time and byte values in human-readable format.
+func (s *NodesInfoService) Human(human bool) *NodesInfoService {
+	s.human = &human
+	return s
+}
+
+// Pretty indicates whether to indent the returned JSON.
+func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *NodesInfoService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{
+		"node_id": strings.Join(s.nodeId, ","),
+		"metric":  strings.Join(s.metric, ","),
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.flatSettings != nil {
+		params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+	}
+	if s.human != nil {
+		params.Set("human", fmt.Sprintf("%v", *s.human))
+	}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *NodesInfoService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *NodesInfoService) Do() (*NodesInfoResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(NodesInfoResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// NodesInfoResponse is the response of NodesInfoService.Do.
+type NodesInfoResponse struct {
+	ClusterName string                    `json:"cluster_name"`
+	Nodes       map[string]*NodesInfoNode `json:"nodes"`
+}
+
+type NodesInfoNode struct {
+	// Name of the node, e.g. "Mister Fear"
+	Name string `json:"name"`
+	// TransportAddress, e.g. "inet[/127.0.0.1:9300]"
+	TransportAddress string `json:"transport_address"`
+	// Host is the host name, e.g. "macbookair"
+	Host string `json:"host"`
+	// IP is the IP address, e.g. "192.168.1.2"
+	IP string `json:"ip"`
+	// Version is the Elasticsearch version running on the node, e.g. "1.4.3"
+	Version string `json:"version"`
+	// Build is the Elasticsearch build, e.g. "36a29a7"
+	Build string `json:"build"`
+	// HTTPAddress, e.g. "inet[/127.0.0.1:9200]"
+	HTTPAddress string `json:"http_address"`
+	// HTTPSAddress, e.g. "inet[/127.0.0.1:9200]"
+	HTTPSAddress string `json:"https_address"`
+
+	// Settings of the node, e.g. paths and pidfile.
+	Settings map[string]interface{} `json:"settings"`
+
+	// OS information, e.g. CPU and memory.
+	OS *NodesInfoNodeOS `json:"os"`
+
+	// Process information, e.g. max file descriptors.
+	Process *NodesInfoNodeProcess `json:"process"`
+
+	// JVM information, e.g. VM version.
+	JVM *NodesInfoNodeProcess `json:"jvm"`
+
+	// ThreadPool information.
+	ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"`
+
+	// Network information.
+	Network *NodesInfoNodeNetwork `json:"network"`
+
+	// Network information.
+	Transport *NodesInfoNodeTransport `json:"transport"`
+
+	// HTTP information.
+	HTTP *NodesInfoNodeHTTP `json:"http"`
+
+	// Plugins information.
+	Plugins []*NodesInfoNodePlugin `json:"plugins"`
+}
+
+type NodesInfoNodeOS struct {
+	RefreshInterval         string `json:"refresh_interval"`           // e.g. 1s
+	RefreshIntervalInMillis int    `json:"refresh_interval_in_millis"` // e.g. 1000
+	AvailableProcessors     int    `json:"available_processors"`       // e.g. 4
+
+	// CPU information
+	CPU struct {
+		Vendor           string `json:"vendor"`              // e.g. Intel
+		Model            string `json:"model"`               // e.g. iMac15,1
+		MHz              int    `json:"mhz"`                 // e.g. 3500
+		TotalCores       int    `json:"total_cores"`         // e.g. 4
+		TotalSockets     int    `json:"total_sockets"`       // e.g. 4
+		CoresPerSocket   int    `json:"cores_per_socket"`    // e.g. 16
+		CacheSizeInBytes int    `json:"cache_size_in_bytes"` // e.g. 256
+	} `json:"cpu"`
+
+	// Mem information
+	Mem struct {
+		Total        string `json:"total"`          // e.g. 16gb
+		TotalInBytes int    `json:"total_in_bytes"` // e.g. 17179869184
+	} `json:"mem"`
+
+	// Swap information
+	Swap struct {
+		Total        string `json:"total"`          // e.g. 1gb
+		TotalInBytes int    `json:"total_in_bytes"` // e.g. 1073741824
+	} `json:"swap"`
+}
+
+type NodesInfoNodeProcess struct {
+	RefreshInterval         string `json:"refresh_interval"`           // e.g. 1s
+	RefreshIntervalInMillis int    `json:"refresh_interval_in_millis"` // e.g. 1000
+	ID                      int    `json:"id"`                         // process id, e.g. 87079
+	MaxFileDescriptors      int    `json:"max_file_descriptors"`       // e.g. 32768
+	Mlockall                bool   `json:"mlockall"`                   // e.g. false
+}
+
+type NodesInfoNodeJVM struct {
+	PID               int       `json:"pid"`        // process id, e.g. 87079
+	Version           string    `json:"version"`    // e.g. "1.8.0_25"
+	VMName            string    `json:"vm_name"`    // e.g. "Java HotSpot(TM) 64-Bit Server VM"
+	VMVersion         string    `json:"vm_version"` // e.g. "25.25-b02"
+	VMVendor          string    `json:"vm_vendor"`  // e.g. "Oracle Corporation"
+	StartTime         time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z"
+	StartTimeInMillis int64     `json:"start_time_in_millis"`
+
+	// Mem information
+	Mem struct {
+		HeapInit           string `json:"heap_init"` // e.g. 1gb
+		HeapInitInBytes    int    `json:"heap_init_in_bytes"`
+		HeapMax            string `json:"heap_max"` // e.g. 4gb
+		HeapMaxInBytes     int    `json:"heap_max_in_bytes"`
+		NonHeapInit        string `json:"non_heap_init"` // e.g. 2.4mb
+		NonHeapInitInBytes int    `json:"non_heap_init_in_bytes"`
+		NonHeapMax         string `json:"non_heap_max"` // e.g. 0b
+		NonHeapMaxInBytes  int    `json:"non_heap_max_in_bytes"`
+		DirectMax          string `json:"direct_max"` // e.g. 4gb
+		DirectMaxInBytes   int    `json:"direct_max_in_bytes"`
+	} `json:"mem"`
+
+	GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"]
+	MemoryPools  []string `json:"memory_pools"`  // e.g. ["Code Cache", "Metaspace"]
+}
+
+type NodesInfoNodeThreadPool struct {
+	Percolate  *NodesInfoNodeThreadPoolSection `json:"percolate"`
+	Bench      *NodesInfoNodeThreadPoolSection `json:"bench"`
+	Listener   *NodesInfoNodeThreadPoolSection `json:"listener"`
+	Index      *NodesInfoNodeThreadPoolSection `json:"index"`
+	Refresh    *NodesInfoNodeThreadPoolSection `json:"refresh"`
+	Suggest    *NodesInfoNodeThreadPoolSection `json:"suggest"`
+	Generic    *NodesInfoNodeThreadPoolSection `json:"generic"`
+	Warmer     *NodesInfoNodeThreadPoolSection `json:"warmer"`
+	Search     *NodesInfoNodeThreadPoolSection `json:"search"`
+	Flush      *NodesInfoNodeThreadPoolSection `json:"flush"`
+	Optimize   *NodesInfoNodeThreadPoolSection `json:"optimize"`
+	Management *NodesInfoNodeThreadPoolSection `json:"management"`
+	Get        *NodesInfoNodeThreadPoolSection `json:"get"`
+	Merge      *NodesInfoNodeThreadPoolSection `json:"merge"`
+	Bulk       *NodesInfoNodeThreadPoolSection `json:"bulk"`
+	Snapshot   *NodesInfoNodeThreadPoolSection `json:"snapshot"`
+}
+
+type NodesInfoNodeThreadPoolSection struct {
+	Type      string      `json:"type"`       // e.g. fixed
+	Min       int         `json:"min"`        // e.g. 4
+	Max       int         `json:"max"`        // e.g. 4
+	KeepAlive string      `json:"keep_alive"` // e.g. "5m"
+	QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1
+}
+
+type NodesInfoNodeNetwork struct {
+	RefreshInterval         string `json:"refresh_interval"`           // e.g. 1s
+	RefreshIntervalInMillis int    `json:"refresh_interval_in_millis"` // e.g. 1000
+	PrimaryInterface        struct {
+		Address    string `json:"address"`     // e.g. 192.168.1.2
+		Name       string `json:"name"`        // e.g. en0
+		MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66
+	} `json:"primary_interface"`
+}
+
+type NodesInfoNodeTransport struct {
+	BoundAddress   string `json:"bound_address"`   // e.g. inet[/127.0.0.1:9300]
+	PublishAddress string `json:"publish_address"` // e.g. inet[/127.0.0.1:9300]
+}
+
+type NodesInfoNodeHTTP struct {
+	BoundAddress            string `json:"bound_address"`      // e.g. inet[/127.0.0.1:9300]
+	PublishAddress          string `json:"publish_address"`    // e.g. inet[/127.0.0.1:9300]
+	MaxContentLength        string `json:"max_content_length"` // e.g. "100mb"
+	MaxContentLengthInBytes int64  `json:"max_content_length_in_bytes"`
+}
+
+type NodesInfoNodePlugin struct {
+	Name        string `json:"name"`
+	Description string `json:"description"`
+	Site        bool   `json:"site"`
+	JVM         bool   `json:"jvm"`
+	URL         string `json:"url"` // e.g. /_plugin/dummy/
+}

+ 135 - 0
github.com/olivere/elastic/optimize.go

@@ -0,0 +1,135 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type OptimizeService struct {
+	client             *Client
+	indices            []string
+	maxNumSegments     *int
+	onlyExpungeDeletes *bool
+	flush              *bool
+	waitForMerge       *bool
+	force              *bool
+	pretty             bool
+}
+
+func NewOptimizeService(client *Client) *OptimizeService {
+	builder := &OptimizeService{
+		client:  client,
+		indices: make([]string, 0),
+	}
+	return builder
+}
+
+func (s *OptimizeService) Index(index string) *OptimizeService {
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *OptimizeService) Indices(indices ...string) *OptimizeService {
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService {
+	s.maxNumSegments = &maxNumSegments
+	return s
+}
+
+func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService {
+	s.onlyExpungeDeletes = &onlyExpungeDeletes
+	return s
+}
+
+func (s *OptimizeService) Flush(flush bool) *OptimizeService {
+	s.flush = &flush
+	return s
+}
+
+func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService {
+	s.waitForMerge = &waitForMerge
+	return s
+}
+
+func (s *OptimizeService) Force(force bool) *OptimizeService {
+	s.force = &force
+	return s
+}
+
+func (s *OptimizeService) Pretty(pretty bool) *OptimizeService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *OptimizeService) Do() (*OptimizeResult, error) {
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		path += strings.Join(indexPart, ",")
+	}
+
+	path += "/_optimize"
+
+	// Parameters
+	params := make(url.Values)
+	if s.maxNumSegments != nil {
+		params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments))
+	}
+	if s.onlyExpungeDeletes != nil {
+		params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
+	}
+	if s.flush != nil {
+		params.Set("flush", fmt.Sprintf("%v", *s.flush))
+	}
+	if s.waitForMerge != nil {
+		params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge))
+	}
+	if s.force != nil {
+		params.Set("force", fmt.Sprintf("%v", *s.force))
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(OptimizeResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of an optimize request.
+
+type OptimizeResult struct {
+	Shards shardsInfo `json:"_shards,omitempty"`
+}

+ 301 - 0
github.com/olivere/elastic/percolate.go

@@ -0,0 +1,301 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html.
+type PercolateService struct {
+	client              *Client
+	pretty              bool
+	index               string
+	typ                 string
+	id                  string
+	version             interface{}
+	versionType         string
+	routing             []string
+	preference          string
+	ignoreUnavailable   *bool
+	percolateIndex      string
+	percolatePreference string
+	percolateRouting    string
+	source              string
+	allowNoIndices      *bool
+	expandWildcards     string
+	percolateFormat     string
+	percolateType       string
+	bodyJson            interface{}
+	bodyString          string
+}
+
+// NewPercolateService creates a new PercolateService.
+func NewPercolateService(client *Client) *PercolateService {
+	return &PercolateService{
+		client:  client,
+		routing: make([]string, 0),
+	}
+}
+
+// Index is the name of the index of the document being percolated.
+func (s *PercolateService) Index(index string) *PercolateService {
+	s.index = index
+	return s
+}
+
+// Type is the type of the document being percolated.
+func (s *PercolateService) Type(typ string) *PercolateService {
+	s.typ = typ
+	return s
+}
+
+// Id is to substitute the document in the request body with a
+// document that is known by the specified id. On top of the id,
+// the index and type parameter will be used to retrieve
+// the document from within the cluster.
+func (s *PercolateService) Id(id string) *PercolateService {
+	s.id = id
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expressions
+// to concrete indices that are open, closed or both.
+func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// PercolateFormat indicates whether to return an array of matching
+// query IDs instead of objects.
+func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService {
+	s.percolateFormat = percolateFormat
+	return s
+}
+
+// PercolateType is the type to percolate document into. Defaults to type.
+func (s *PercolateService) PercolateType(percolateType string) *PercolateService {
+	s.percolateType = percolateType
+	return s
+}
+
+// PercolateRouting is the routing value to use when percolating
+// the existing document.
+func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService {
+	s.percolateRouting = percolateRouting
+	return s
+}
+
+// Source is the URL-encoded request definition.
+func (s *PercolateService) Source(source string) *PercolateService {
+	s.source = source
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// PercolateIndex is the index to percolate the document into. Defaults to index.
+func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService {
+	s.percolateIndex = percolateIndex
+	return s
+}
+
+// PercolatePreference defines which shard to prefer when executing
+// the percolate request.
+func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService {
+	s.percolatePreference = percolatePreference
+	return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *PercolateService) Version(version interface{}) *PercolateService {
+	s.version = version
+	return s
+}
+
+// VersionType is the specific version type.
+func (s *PercolateService) VersionType(versionType string) *PercolateService {
+	s.versionType = versionType
+	return s
+}
+
+// Routing is a list of specific routing values.
+func (s *PercolateService) Routing(routing []string) *PercolateService {
+	s.routing = routing
+	return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: random).
+func (s *PercolateService) Preference(preference string) *PercolateService {
+	s.preference = preference
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *PercolateService) Pretty(pretty bool) *PercolateService {
+	s.pretty = pretty
+	return s
+}
+
+// Doc wraps the given document into the "doc" key of the body.
+func (s *PercolateService) Doc(doc interface{}) *PercolateService {
+	return s.BodyJson(map[string]interface{}{"doc": doc})
+}
+
+// BodyJson is the percolator request definition using the percolate DSL.
+func (s *PercolateService) BodyJson(body interface{}) *PercolateService {
+	s.bodyJson = body
+	return s
+}
+
+// BodyString is the percolator request definition using the percolate DSL.
+func (s *PercolateService) BodyString(body string) *PercolateService {
+	s.bodyString = body
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PercolateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{
+		"index": s.index,
+		"type":  s.typ,
+		"id":    s.id,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%v", s.version))
+	}
+	if s.versionType != "" {
+		params.Set("version_type", s.versionType)
+	}
+	if len(s.routing) > 0 {
+		params.Set("routing", strings.Join(s.routing, ","))
+	}
+	if s.preference != "" {
+		params.Set("preference", s.preference)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.percolateIndex != "" {
+		params.Set("percolate_index", s.percolateIndex)
+	}
+	if s.percolatePreference != "" {
+		params.Set("percolate_preference", s.percolatePreference)
+	}
+	if s.percolateRouting != "" {
+		params.Set("percolate_routing", s.percolateRouting)
+	}
+	if s.source != "" {
+		params.Set("source", s.source)
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.percolateFormat != "" {
+		params.Set("percolate_format", s.percolateFormat)
+	}
+	if s.percolateType != "" {
+		params.Set("percolate_type", s.percolateType)
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PercolateService) Validate() error {
+	var invalid []string
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if s.typ == "" {
+		invalid = append(invalid, "Type")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *PercolateService) Do() (*PercolateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request body
+	var body interface{}
+	if s.bodyJson != nil {
+		body = s.bodyJson
+	} else {
+		body = s.bodyString
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(PercolateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// PercolateResponse is the response of PercolateService.Do.
+type PercolateResponse struct {
+	TookInMillis int64             `json:"took"`  // search time in milliseconds
+	Total        int64             `json:"total"` // total matches
+	Matches      []*PercolateMatch `json:"matches,omitempty"`
+	Facets       SearchFacets      `json:"facets,omitempty"`       // results from facets
+	Aggregations Aggregations      `json:"aggregations,omitempty"` // results from aggregations
+}
+
+// PercolateMatch returns a single match in a PercolateResponse.
+type PercolateMatch struct {
+	Index string  `json:"_index,omitempty"`
+	Id    string  `json:"_id"`
+	Score float64 `json:"_score,omitempty"`
+}

+ 117 - 0
github.com/olivere/elastic/ping.go

@@ -0,0 +1,117 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"net/http"
+	"net/url"
+)
+
+// PingService checks if an Elasticsearch server on a given URL is alive.
+// When asked for, it can also return various information about the
+// Elasticsearch server, e.g. the Elasticsearch version number.
+//
+// Ping simply starts a HTTP GET request to the URL of the server.
+// If the server responds with HTTP Status code 200 OK, the server is alive.
+type PingService struct {
+	client       *Client
+	url          string
+	timeout      string
+	httpHeadOnly bool
+	pretty       bool
+}
+
+// PingResult is the result returned from querying the Elasticsearch server.
+type PingResult struct {
+	Status      int    `json:"status"`
+	Name        string `json:"name"`
+	ClusterName string `json:"cluster_name"`
+	Version     struct {
+		Number         string `json:"number"`
+		BuildHash      string `json:"build_hash"`
+		BuildTimestamp string `json:"build_timestamp"`
+		BuildSnapshot  bool   `json:"build_snapshot"`
+		LuceneVersion  string `json:"lucene_version"`
+	} `json:"version"`
+	TagLine string `json:"tagline"`
+}
+
+func NewPingService(client *Client) *PingService {
+	return &PingService{
+		client:       client,
+		url:          DefaultURL,
+		httpHeadOnly: false,
+		pretty:       false,
+	}
+}
+
+func (s *PingService) URL(url string) *PingService {
+	s.url = url
+	return s
+}
+
+func (s *PingService) Timeout(timeout string) *PingService {
+	s.timeout = timeout
+	return s
+}
+
+// HeadOnly makes the service to only return the status code in Do;
+// the PingResult will be nil.
+func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService {
+	s.httpHeadOnly = httpHeadOnly
+	return s
+}
+
+func (s *PingService) Pretty(pretty bool) *PingService {
+	s.pretty = pretty
+	return s
+}
+
+// Do returns the PingResult, the HTTP status code of the Elasticsearch
+// server, and an error.
+func (s *PingService) Do() (*PingResult, int, error) {
+	url_ := s.url + "/"
+
+	params := make(url.Values)
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if len(params) > 0 {
+		url_ += "?" + params.Encode()
+	}
+
+	var method string
+	if s.httpHeadOnly {
+		method = "HEAD"
+	} else {
+		method = "GET"
+	}
+
+	// Notice: This service must NOT use PerformRequest!
+	req, err := NewRequest(method, url_)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	res, err := s.client.c.Do((*http.Request)(req))
+	if err != nil {
+		return nil, 0, err
+	}
+	defer res.Body.Close()
+
+	var ret *PingResult
+	if !s.httpHeadOnly {
+		ret = new(PingResult)
+		if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+			return nil, res.StatusCode, err
+		}
+	}
+
+	return ret, res.StatusCode, nil
+}

+ 222 - 0
github.com/olivere/elastic/put_mapping.go

@@ -0,0 +1,222 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// PutMappingService allows to register specific mapping definition
+// for a specific type.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-put-mapping.html.
+type PutMappingService struct {
+	client            *Client
+	pretty            bool
+	typ               string
+	index             []string
+	masterTimeout     string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+	ignoreConflicts   *bool
+	timeout           string
+	bodyJson          map[string]interface{}
+	bodyString        string
+}
+
+// NewPutMappingService creates a new PutMappingService.
+func NewPutMappingService(client *Client) *PutMappingService {
+	return &PutMappingService{
+		client: client,
+		index:  make([]string, 0),
+	}
+}
+
+// Index is a list of index names the mapping should be added to
+// (supports wildcards); use `_all` or omit to add the mapping on all indices.
+func (s *PutMappingService) Index(index ...string) *PutMappingService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Type is the name of the document type.
+func (s *PutMappingService) Type(typ string) *PutMappingService {
+	s.typ = typ
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *PutMappingService) Timeout(timeout string) *PutMappingService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *PutMappingService) MasterTimeout(masterTimeout string) *PutMappingService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *PutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *PutMappingService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *PutMappingService) AllowNoIndices(allowNoIndices bool) *PutMappingService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *PutMappingService) ExpandWildcards(expandWildcards string) *PutMappingService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// IgnoreConflicts specifies whether to ignore conflicts while updating
+// the mapping (default: false).
+func (s *PutMappingService) IgnoreConflicts(ignoreConflicts bool) *PutMappingService {
+	s.ignoreConflicts = &ignoreConflicts
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *PutMappingService) Pretty(pretty bool) *PutMappingService {
+	s.pretty = pretty
+	return s
+}
+
+// BodyJson contains the mapping definition.
+func (s *PutMappingService) BodyJson(mapping map[string]interface{}) *PutMappingService {
+	s.bodyJson = mapping
+	return s
+}
+
+// BodyString is the mapping definition serialized as a string.
+func (s *PutMappingService) BodyString(mapping string) *PutMappingService {
+	s.bodyString = mapping
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PutMappingService) buildURL() (string, url.Values, error) {
+	var err error
+	var path string
+
+	// Build URL: Typ MUST be specified and is verified in Validate.
+	if len(s.index) > 0 {
+		path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+			"index": strings.Join(s.index, ","),
+			"type":  s.typ,
+		})
+	} else {
+		path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{
+			"type": s.typ,
+		})
+	}
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.ignoreConflicts != nil {
+		params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts))
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PutMappingService) Validate() error {
+	var invalid []string
+	if s.typ == "" {
+		invalid = append(invalid, "Type")
+	}
+	if s.bodyString == "" && s.bodyJson == nil {
+		invalid = append(invalid, "BodyJson")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *PutMappingService) Do() (*PutMappingResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request body
+	var body interface{}
+	if s.bodyJson != nil {
+		body = s.bodyJson
+	} else {
+		body = s.bodyString
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("PUT", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(PutMappingResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// PutMappingResponse is the response of PutMappingService.Do.
+type PutMappingResponse struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 152 - 0
github.com/olivere/elastic/put_template.go

@@ -0,0 +1,152 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// PutTemplateService creates or updates a search template.
+// The documentation can be found at
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type PutTemplateService struct {
+	client      *Client
+	pretty      bool
+	id          string
+	opType      string
+	version     *int
+	versionType string
+	bodyJson    interface{}
+	bodyString  string
+}
+
+// NewPutTemplateService creates a new PutTemplateService.
+func NewPutTemplateService(client *Client) *PutTemplateService {
+	return &PutTemplateService{
+		client: client,
+	}
+}
+
+// Id is the template ID.
+func (s *PutTemplateService) Id(id string) *PutTemplateService {
+	s.id = id
+	return s
+}
+
+// OpType is an explicit operation type.
+func (s *PutTemplateService) OpType(opType string) *PutTemplateService {
+	s.opType = opType
+	return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *PutTemplateService) Version(version int) *PutTemplateService {
+	s.version = &version
+	return s
+}
+
+// VersionType is a specific version type.
+func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService {
+	s.versionType = versionType
+	return s
+}
+
+// BodyJson is the document as a JSON serializable object.
+func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService {
+	s.bodyJson = body
+	return s
+}
+
+// BodyString is the document as a string.
+func (s *PutTemplateService) BodyString(body string) *PutTemplateService {
+	s.bodyString = body
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *PutTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+		"id": s.id,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *s.version))
+	}
+	if s.versionType != "" {
+		params.Set("version_type", s.versionType)
+	}
+	if s.opType != "" {
+		params.Set("op_type", s.opType)
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *PutTemplateService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if s.bodyString == "" && s.bodyJson == nil {
+		invalid = append(invalid, "BodyJson")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *PutTemplateService) Do() (*PutTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request body
+	var body interface{}
+	if s.bodyJson != nil {
+		body = s.bodyJson
+	} else {
+		body = s.bodyString
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("PUT", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(PutTemplateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// PutTemplateResponse is the response of PutTemplateService.Do.
+type PutTemplateResponse struct {
+	Id      string `json:"_id"`
+	Version int    `json:"_version"`
+	Created bool   `json:"created"`
+}

+ 14 - 0
github.com/olivere/elastic/query.go

@@ -0,0 +1,14 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents the generic query interface.
+// A querys' only purpose is to return the
+// source of the query as a JSON-serializable
+// object. Returning a map[string]interface{}
+// will do.
+type Query interface {
+	Source() interface{}
+}

+ 99 - 0
github.com/olivere/elastic/refresh.go

@@ -0,0 +1,99 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type RefreshService struct {
+	client  *Client
+	indices []string
+	force   *bool
+	pretty  bool
+}
+
+func NewRefreshService(client *Client) *RefreshService {
+	builder := &RefreshService{
+		client:  client,
+		indices: make([]string, 0),
+	}
+	return builder
+}
+
+func (s *RefreshService) Index(index string) *RefreshService {
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *RefreshService) Indices(indices ...string) *RefreshService {
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *RefreshService) Force(force bool) *RefreshService {
+	s.force = &force
+	return s
+}
+
+func (s *RefreshService) Pretty(pretty bool) *RefreshService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *RefreshService) Do() (*RefreshResult, error) {
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		path += strings.Join(indexPart, ",")
+	}
+
+	path += "/_refresh"
+
+	// Parameters
+	params := make(url.Values)
+	if s.force != nil {
+		params.Set("force", fmt.Sprintf("%v", *s.force))
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(RefreshResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a refresh request.
+
+type RefreshResult struct {
+	Shards shardsInfo `json:"_shards,omitempty"`
+}

+ 222 - 0
github.com/olivere/elastic/reindexer.go

@@ -0,0 +1,222 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"errors"
+)
+
+// Reindexer simplifies the process of reindexing an index. You typically
+// reindex a source index to a target index. However, you can also specify
+// a query that filters out documents from the source index before bulk
+// indexing them into the target index. The caller may also specify a
+// different client for the target, e.g. when copying indices from one
+// Elasticsearch cluster to another.
+//
+// Internally, the Reindex users a scan and scroll operation on the source
+// index and bulk indexing to push data into the target index.
+//
+// The caller is responsible for setting up and/or clearing the target index
+// before starting the reindex process.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
+// for more information about reindexing.
+type Reindexer struct {
+	sourceClient, targetClient *Client
+	sourceIndex, targetIndex   string
+	query                      Query
+	bulkSize                   int
+	scroll                     string
+	progress                   ReindexerProgressFunc
+	statsOnly                  bool
+}
+
+// ReindexerProgressFunc is a callback that can be used with Reindexer
+// to report progress while reindexing data.
+type ReindexerProgressFunc func(current, total int64)
+
+// ReindexerResponse is returned from the Do func in a Reindexer.
+// By default, it returns the number of succeeded and failed bulk operations.
+// To return details about all failed items, set StatsOnly to false in
+// Reindexer.
+type ReindexerResponse struct {
+	Success int64
+	Failed  int64
+	Errors  []*BulkResponseItem
+}
+
+// NewReindexer returns a new Reindexer.
+func NewReindexer(client *Client, source, target string) *Reindexer {
+	return &Reindexer{
+		sourceClient: client,
+		sourceIndex:  source,
+		targetIndex:  target,
+		statsOnly:    true,
+	}
+}
+
+// TargetClient specifies a different client for the target. This is
+// necessary when the target index is in a different Elasticsearch cluster.
+// By default, the source and target clients are the same.
+func (ix *Reindexer) TargetClient(c *Client) *Reindexer {
+	ix.targetClient = c
+	return ix
+}
+
+// Query specifies the query to apply to the source. It filters out those
+// documents to be indexed into target. A nil query does not filter out any
+// documents.
+func (ix *Reindexer) Query(q Query) *Reindexer {
+	ix.query = q
+	return ix
+}
+
+// BulkSize returns the number of documents to send to Elasticsearch per chunk.
+// The default is 500.
+func (ix *Reindexer) BulkSize(size int) *Reindexer {
+	ix.bulkSize = size
+	return ix
+}
+
+// Scroll specifies for how long the scroll operation on the source index
+// should be maintained. The default is 5m.
+func (ix *Reindexer) Scroll(timeout string) *Reindexer {
+	ix.scroll = timeout
+	return ix
+}
+
+// Progress indicates a callback that will be called while indexing.
+func (ix *Reindexer) Progress(f ReindexerProgressFunc) *Reindexer {
+	ix.progress = f
+	return ix
+}
+
+// StatsOnly indicates whether the Do method should return details e.g. about
+// the documents that failed while indexing. It is true by default, i.e. only
+// the number of documents that succeeded/failed are returned. Set to false
+// if you want all the details.
+func (ix *Reindexer) StatsOnly(statsOnly bool) *Reindexer {
+	ix.statsOnly = statsOnly
+	return ix
+}
+
+// Do starts the reindexing process.
+func (ix *Reindexer) Do() (*ReindexerResponse, error) {
+	if ix.sourceClient == nil {
+		return nil, errors.New("no source client")
+	}
+	if ix.sourceIndex == "" {
+		return nil, errors.New("no source index")
+	}
+	if ix.targetIndex == "" {
+		return nil, errors.New("no target index")
+	}
+	if ix.targetClient == nil {
+		ix.targetClient = ix.sourceClient
+	}
+	if ix.bulkSize <= 0 {
+		ix.bulkSize = 500
+	}
+	if ix.scroll == "" {
+		ix.scroll = "5m"
+	}
+
+	// Count total to report progress (if necessary)
+	var err error
+	var current, total int64
+	if ix.progress != nil {
+		total, err = ix.count()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Prepare scan and scroll to iterate through the source index
+	scanner := ix.sourceClient.Scan(ix.sourceIndex).Scroll(ix.scroll)
+	if ix.query != nil {
+		scanner = scanner.Query(ix.query)
+	}
+	cursor, err := scanner.Do()
+
+	bulk := ix.targetClient.Bulk().Index(ix.targetIndex)
+
+	ret := &ReindexerResponse{
+		Errors: make([]*BulkResponseItem, 0),
+	}
+
+	// Main loop iterates through the source index and bulk indexes into target.
+	for {
+		docs, err := cursor.Next()
+		if err == EOS {
+			break
+		}
+		if err != nil {
+			return ret, err
+		}
+
+		if docs.TotalHits() > 0 {
+			for _, hit := range docs.Hits.Hits {
+				if ix.progress != nil {
+					current++
+					ix.progress(current, total)
+				}
+
+				// TODO(oe) Do we need to deserialize here?
+				source := make(map[string]interface{})
+				if err := json.Unmarshal(*hit.Source, &source); err != nil {
+					return ret, err
+				}
+
+				// Enqueue and write into target index
+				req := NewBulkIndexRequest().Index(ix.targetIndex).Type(hit.Type).Id(hit.Id).Doc(source)
+				bulk.Add(req)
+				if bulk.NumberOfActions() >= ix.bulkSize {
+					bulk, err = ix.commit(bulk, ret)
+					if err != nil {
+						return ret, err
+					}
+				}
+			}
+		}
+	}
+
+	// Final flush
+	if bulk.NumberOfActions() > 0 {
+		bulk, err = ix.commit(bulk, ret)
+		if err != nil {
+			return ret, err
+		}
+		bulk = nil
+	}
+
+	return ret, nil
+}
+
+// count returns the number of documents in the source index.
+// The query is taken into account, if specified.
+func (ix *Reindexer) count() (int64, error) {
+	service := ix.sourceClient.Count(ix.sourceIndex)
+	if ix.query != nil {
+		service = service.Query(ix.query)
+	}
+	return service.Do()
+}
+
+// commit commits a bulk, updates the stats, and returns a fresh bulk service.
+func (ix *Reindexer) commit(bulk *BulkService, ret *ReindexerResponse) (*BulkService, error) {
+	bres, err := bulk.Do()
+	if err != nil {
+		return nil, err
+	}
+	ret.Success += int64(len(bres.Succeeded()))
+	failed := bres.Failed()
+	ret.Failed += int64(len(failed))
+	if !ix.statsOnly {
+		ret.Errors = append(ret.Errors, failed...)
+	}
+	bulk = ix.targetClient.Bulk().Index(ix.targetIndex)
+	return bulk, nil
+}

+ 59 - 0
github.com/olivere/elastic/request.go

@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"runtime"
+	"strings"
+)
+
+// Elasticsearch-specific HTTP request
+type Request http.Request
+
+func NewRequest(method, url string) (*Request, error) {
+	req, err := http.NewRequest(method, url, nil)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")")
+	req.Header.Add("Accept", "application/json")
+	return (*Request)(req), nil
+}
+
+func (r *Request) SetBodyJson(data interface{}) error {
+	body, err := json.Marshal(data)
+	if err != nil {
+		return err
+	}
+	r.SetBody(bytes.NewReader(body))
+	r.Header.Set("Content-Type", "application/json")
+	return nil
+}
+
+func (r *Request) SetBodyString(body string) error {
+	return r.SetBody(strings.NewReader(body))
+}
+
+func (r *Request) SetBody(body io.Reader) error {
+	rc, ok := body.(io.ReadCloser)
+	if !ok && body != nil {
+		rc = ioutil.NopCloser(body)
+	}
+	r.Body = rc
+	if body != nil {
+		switch v := body.(type) {
+		case *strings.Reader:
+			r.ContentLength = int64(v.Len())
+		case *bytes.Buffer:
+			r.ContentLength = int64(v.Len())
+		}
+	}
+	return nil
+}

+ 40 - 0
github.com/olivere/elastic/rescore.go

@@ -0,0 +1,40 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Rescore struct {
+	rescorer                 Rescorer
+	windowSize               *int
+	defaultRescoreWindowSize *int
+}
+
+func NewRescore() *Rescore {
+	return &Rescore{}
+}
+
+func (r *Rescore) WindowSize(windowSize int) *Rescore {
+	r.windowSize = &windowSize
+	return r
+}
+
+func (r *Rescore) IsEmpty() bool {
+	return r.rescorer == nil
+}
+
+func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore {
+	r.rescorer = rescorer
+	return r
+}
+
+func (r *Rescore) Source() interface{} {
+	source := make(map[string]interface{})
+	if r.windowSize != nil {
+		source["window_size"] = *r.windowSize
+	} else if r.defaultRescoreWindowSize != nil {
+		source["window_size"] = *r.defaultRescoreWindowSize
+	}
+	source[r.rescorer.Name()] = r.rescorer.Source()
+	return source
+}

+ 59 - 0
github.com/olivere/elastic/rescorer.go

@@ -0,0 +1,59 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Rescorer interface {
+	Name() string
+	Source() interface{}
+}
+
+// -- Query Rescorer --
+
+type QueryRescorer struct {
+	query              Query
+	rescoreQueryWeight *float64
+	queryWeight        *float64
+	scoreMode          string
+}
+
+func NewQueryRescorer(query Query) *QueryRescorer {
+	return &QueryRescorer{
+		query: query,
+	}
+}
+
+func (r *QueryRescorer) Name() string {
+	return "query"
+}
+
+func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer {
+	r.rescoreQueryWeight = &rescoreQueryWeight
+	return r
+}
+
+func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer {
+	r.queryWeight = &queryWeight
+	return r
+}
+
+func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer {
+	r.scoreMode = scoreMode
+	return r
+}
+
+func (r *QueryRescorer) Source() interface{} {
+	source := make(map[string]interface{})
+	source["rescore_query"] = r.query.Source()
+	if r.queryWeight != nil {
+		source["query_weight"] = *r.queryWeight
+	}
+	if r.rescoreQueryWeight != nil {
+		source["rescore_query_weight"] = *r.rescoreQueryWeight
+	}
+	if r.scoreMode != "" {
+		source["score_mode"] = r.scoreMode
+	}
+	return source
+}

+ 43 - 0
github.com/olivere/elastic/response.go

@@ -0,0 +1,43 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"io/ioutil"
+	"net/http"
+)
+
+// Response represents a response from Elasticsearch.
+type Response struct {
+	// StatusCode is the HTTP status code, e.g. 200.
+	StatusCode int
+	// Header is the HTTP header from the HTTP response.
+	// Keys in the map are canonicalized (see http.CanonicalHeaderKey).
+	Header http.Header
+	// Body is the deserialized response body.
+	Body json.RawMessage
+}
+
+// newResponse creates a new response from the HTTP response.
+func (c *Client) newResponse(res *http.Response) (*Response, error) {
+	r := &Response{
+		StatusCode: res.StatusCode,
+		Header:     res.Header,
+	}
+	if res.Body != nil {
+		slurp, err := ioutil.ReadAll(res.Body)
+		if err != nil {
+			return nil, err
+		}
+		// HEAD requests return a body but no content
+		if len(slurp) > 0 {
+			if err := c.decoder.Decode(slurp, &r.Body); err != nil {
+				return nil, err
+			}
+		}
+	}
+	return r, nil
+}

+ 273 - 0
github.com/olivere/elastic/scan.go

@@ -0,0 +1,273 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+const (
+	defaultKeepAlive = "5m"
+)
+
+var (
+	// End of stream (or scan)
+	EOS = errors.New("EOS")
+
+	// No ScrollId
+	ErrNoScrollId = errors.New("no scrollId")
+)
+
+// ScanService manages a cursor through documents in Elasticsearch.
+type ScanService struct {
+	client    *Client
+	indices   []string
+	types     []string
+	keepAlive string
+	query     Query
+	size      *int
+	pretty    bool
+}
+
+func NewScanService(client *Client) *ScanService {
+	builder := &ScanService{
+		client: client,
+		query:  NewMatchAllQuery(),
+	}
+	return builder
+}
+
+func (s *ScanService) Index(index string) *ScanService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *ScanService) Indices(indices ...string) *ScanService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *ScanService) Type(typ string) *ScanService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+func (s *ScanService) Types(types ...string) *ScanService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+// Scroll is an alias for KeepAlive, the time to keep
+// the cursor alive (e.g. "5m" for 5 minutes).
+func (s *ScanService) Scroll(keepAlive string) *ScanService {
+	s.keepAlive = keepAlive
+	return s
+}
+
+// KeepAlive sets the maximum time the cursor will be
+// available before expiration (e.g. "5m" for 5 minutes).
+func (s *ScanService) KeepAlive(keepAlive string) *ScanService {
+	s.keepAlive = keepAlive
+	return s
+}
+
+func (s *ScanService) Query(query Query) *ScanService {
+	s.query = query
+	return s
+}
+
+func (s *ScanService) Pretty(pretty bool) *ScanService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *ScanService) Size(size int) *ScanService {
+	s.size = &size
+	return s
+}
+
+func (s *ScanService) Do() (*ScanCursor, error) {
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		path += strings.Join(indexPart, ",")
+	}
+
+	// Types
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err := uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return nil, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		path += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	path += "/_search"
+
+	// Parameters
+	params := make(url.Values)
+	params.Set("search_type", "scan")
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.keepAlive != "" {
+		params.Set("scroll", s.keepAlive)
+	} else {
+		params.Set("scroll", defaultKeepAlive)
+	}
+	if s.size != nil && *s.size > 0 {
+		params.Set("size", fmt.Sprintf("%d", *s.size))
+	}
+
+	// Set body
+	body := make(map[string]interface{})
+	if s.query != nil {
+		body["query"] = s.query.Source()
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	searchResult := new(SearchResult)
+	if err := json.Unmarshal(res.Body, searchResult); err != nil {
+		return nil, err
+	}
+
+	cursor := NewScanCursor(s.client, s.keepAlive, s.pretty, searchResult)
+
+	return cursor, nil
+}
+
+// scanCursor represents a single page of results from
+// an Elasticsearch Scan operation.
+type ScanCursor struct {
+	Results *SearchResult
+
+	client      *Client
+	keepAlive   string
+	pretty      bool
+	currentPage int
+}
+
+// newScanCursor returns a new initialized instance
+// of scanCursor.
+func NewScanCursor(client *Client, keepAlive string, pretty bool, searchResult *SearchResult) *ScanCursor {
+	return &ScanCursor{
+		client:    client,
+		keepAlive: keepAlive,
+		pretty:    pretty,
+		Results:   searchResult,
+	}
+}
+
+// TotalHits is a convenience method that returns the number
+// of hits the cursor will iterate through.
+func (c *ScanCursor) TotalHits() int64 {
+	if c.Results.Hits == nil {
+		return 0
+	}
+	return c.Results.Hits.TotalHits
+}
+
+// Next returns the next search result or nil when all
+// documents have been scanned.
+//
+// Usage:
+//
+//   for {
+//     res, err := cursor.Next()
+//     if err == elastic.EOS {
+//       // End of stream (or scan)
+//       break
+//     }
+//     if err != nil {
+//       // Handle error
+//     }
+//     // Work with res
+//   }
+//
+func (c *ScanCursor) Next() (*SearchResult, error) {
+	if c.currentPage > 0 {
+		if c.Results.Hits == nil || len(c.Results.Hits.Hits) == 0 || c.Results.Hits.TotalHits == 0 {
+			return nil, EOS
+		}
+	}
+	if c.Results.ScrollId == "" {
+		return nil, EOS
+	}
+
+	// Build url
+	path := "/_search/scroll"
+
+	// Parameters
+	params := make(url.Values)
+	if c.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", c.pretty))
+	}
+	if c.keepAlive != "" {
+		params.Set("scroll", c.keepAlive)
+	} else {
+		params.Set("scroll", defaultKeepAlive)
+	}
+
+	// Set body
+	body := c.Results.ScrollId
+
+	// Get response
+	res, err := c.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	if err := json.Unmarshal(res.Body, c.Results); err != nil {
+		return nil, err
+	}
+
+	c.currentPage += 1
+
+	return c.Results, nil
+}

+ 219 - 0
github.com/olivere/elastic/scroll.go

@@ -0,0 +1,219 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// ScrollService manages a cursor through documents in Elasticsearch.
+type ScrollService struct {
+	client    *Client
+	indices   []string
+	types     []string
+	keepAlive string
+	query     Query
+	size      *int
+	pretty    bool
+	scrollId  string
+}
+
+func NewScrollService(client *Client) *ScrollService {
+	builder := &ScrollService{
+		client: client,
+		query:  NewMatchAllQuery(),
+	}
+	return builder
+}
+
+func (s *ScrollService) Index(index string) *ScrollService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *ScrollService) Indices(indices ...string) *ScrollService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *ScrollService) Type(typ string) *ScrollService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+func (s *ScrollService) Types(types ...string) *ScrollService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+// Scroll is an alias for KeepAlive, the time to keep
+// the cursor alive (e.g. "5m" for 5 minutes).
+func (s *ScrollService) Scroll(keepAlive string) *ScrollService {
+	s.keepAlive = keepAlive
+	return s
+}
+
+// KeepAlive sets the maximum time the cursor will be
+// available before expiration (e.g. "5m" for 5 minutes).
+func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService {
+	s.keepAlive = keepAlive
+	return s
+}
+
+func (s *ScrollService) Query(query Query) *ScrollService {
+	s.query = query
+	return s
+}
+
+func (s *ScrollService) Pretty(pretty bool) *ScrollService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *ScrollService) Size(size int) *ScrollService {
+	s.size = &size
+	return s
+}
+
+func (s *ScrollService) ScrollId(scrollId string) *ScrollService {
+	s.scrollId = scrollId
+	return s
+}
+
+func (s *ScrollService) Do() (*SearchResult, error) {
+	if s.scrollId == "" {
+		return s.GetFirstPage()
+	}
+	return s.GetNextPage()
+}
+
+func (s *ScrollService) GetFirstPage() (*SearchResult, error) {
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		path += strings.Join(indexPart, ",")
+	}
+
+	// Types
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err := uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return nil, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		path += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	path += "/_search"
+
+	// Parameters
+	params := make(url.Values)
+	params.Set("search_type", "scan")
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.keepAlive != "" {
+		params.Set("scroll", s.keepAlive)
+	} else {
+		params.Set("scroll", defaultKeepAlive)
+	}
+	if s.size != nil && *s.size > 0 {
+		params.Set("size", fmt.Sprintf("%d", *s.size))
+	}
+
+	// Set body
+	body := make(map[string]interface{})
+	if s.query != nil {
+		body["query"] = s.query.Source()
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	searchResult := new(SearchResult)
+	if err := json.Unmarshal(res.Body, searchResult); err != nil {
+		return nil, err
+	}
+
+	return searchResult, nil
+}
+
+func (s *ScrollService) GetNextPage() (*SearchResult, error) {
+	if s.scrollId == "" {
+		return nil, EOS
+	}
+
+	// Build url
+	path := "/_search/scroll"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.keepAlive != "" {
+		params.Set("scroll", s.keepAlive)
+	} else {
+		params.Set("scroll", defaultKeepAlive)
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, s.scrollId)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	searchResult := new(SearchResult)
+	if err := json.Unmarshal(res.Body, searchResult); err != nil {
+		return nil, err
+	}
+
+	// Determine last page
+	if searchResult == nil || searchResult.Hits == nil || len(searchResult.Hits.Hits) == 0 || searchResult.Hits.TotalHits == 0 {
+		return nil, EOS
+	}
+
+	return searchResult, nil
+}

+ 507 - 0
github.com/olivere/elastic/search.go

@@ -0,0 +1,507 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"reflect"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// Search for documents in Elasticsearch.
+type SearchService struct {
+	client       *Client
+	searchSource *SearchSource
+	source       interface{}
+	pretty       bool
+	searchType   string
+	indices      []string
+	queryHint    string
+	routing      string
+	preference   string
+	types        []string
+}
+
+// NewSearchService creates a new service for searching in Elasticsearch.
+// You typically do not create the service yourself manually, but access
+// it via client.Search().
+func NewSearchService(client *Client) *SearchService {
+	builder := &SearchService{
+		client:       client,
+		searchSource: NewSearchSource(),
+	}
+	return builder
+}
+
+// SearchSource sets the search source builder to use with this service.
+func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService {
+	s.searchSource = searchSource
+	if s.searchSource == nil {
+		s.searchSource = NewSearchSource()
+	}
+	return s
+}
+
+// Source allows the user to set the request body manually without using
+// any of the structs and interfaces in Elastic.
+func (s *SearchService) Source(source interface{}) *SearchService {
+	s.source = source
+	return s
+}
+
+// Index sets the name of the index to use for search.
+func (s *SearchService) Index(index string) *SearchService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices sets the names of the indices to use for search.
+func (s *SearchService) Indices(indices ...string) *SearchService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Type restricts the search for the given type.
+func (s *SearchService) Type(typ string) *SearchService {
+	if s.types == nil {
+		s.types = []string{typ}
+	} else {
+		s.types = append(s.types, typ)
+	}
+	return s
+}
+
+// Types allows to restrict the search to a list of types.
+func (s *SearchService) Types(types ...string) *SearchService {
+	if s.types == nil {
+		s.types = make([]string, len(types))
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+// Pretty enables the caller to indent the JSON output.
+func (s *SearchService) Pretty(pretty bool) *SearchService {
+	s.pretty = pretty
+	return s
+}
+
+// Timeout sets the timeout to use, e.g. "1s" or "1000ms".
+func (s *SearchService) Timeout(timeout string) *SearchService {
+	s.searchSource = s.searchSource.Timeout(timeout)
+	return s
+}
+
+// TimeoutInMillis sets the timeout in milliseconds.
+func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService {
+	s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis)
+	return s
+}
+
+// SearchType sets the search operation type. Valid values are:
+// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch",
+// "dfs_query_and_fetch", "count", "scan".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-search-type.html#search-request-search-type
+// for details.
+func (s *SearchService) SearchType(searchType string) *SearchService {
+	s.searchType = searchType
+	return s
+}
+
+// Routing allows for (a comma-separated) list of specific routing values.
+func (s *SearchService) Routing(routing string) *SearchService {
+	s.routing = routing
+	return s
+}
+
+// Preference specifies the node or shard the operation should be
+// performed on (default: "random").
+func (s *SearchService) Preference(preference string) *SearchService {
+	s.preference = preference
+	return s
+}
+
+func (s *SearchService) QueryHint(queryHint string) *SearchService {
+	s.queryHint = queryHint
+	return s
+}
+
+// Query sets the query to perform, e.g. MatchAllQuery.
+func (s *SearchService) Query(query Query) *SearchService {
+	s.searchSource = s.searchSource.Query(query)
+	return s
+}
+
+// PostFilter is executed as the last filter. It only affects the
+// search hits but not facets. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-post-filter.html
+// for details.
+func (s *SearchService) PostFilter(postFilter Filter) *SearchService {
+	s.searchSource = s.searchSource.PostFilter(postFilter)
+	return s
+}
+
+// Highlight sets the highlighting. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+// for details.
+func (s *SearchService) Highlight(highlight *Highlight) *SearchService {
+	s.searchSource = s.searchSource.Highlight(highlight)
+	return s
+}
+
+// GlobalSuggestText sets the global text for suggesters. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html#global-suggest
+// for details.
+func (s *SearchService) GlobalSuggestText(globalText string) *SearchService {
+	s.searchSource = s.searchSource.GlobalSuggestText(globalText)
+	return s
+}
+
+// Suggester sets the suggester. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html
+// for details.
+func (s *SearchService) Suggester(suggester Suggester) *SearchService {
+	s.searchSource = s.searchSource.Suggester(suggester)
+	return s
+}
+
+// Facet adds a facet to the search. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html
+// to get an overview of Elasticsearch facets.
+func (s *SearchService) Facet(name string, facet Facet) *SearchService {
+	s.searchSource = s.searchSource.Facet(name, facet)
+	return s
+}
+
+// Aggregation adds an aggregation to the search. See
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html
+// for an overview of aggregations in Elasticsearch.
+func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService {
+	s.searchSource = s.searchSource.Aggregation(name, aggregation)
+	return s
+}
+
+// MinScore excludes documents which have a score less than the minimum
+// specified here. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-min-score.html.
+func (s *SearchService) MinScore(minScore float64) *SearchService {
+	s.searchSource = s.searchSource.MinScore(minScore)
+	return s
+}
+
+// From defines the offset from the first result you want to fetch.
+// Use it in combination with Size to paginate through results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-from-size.html
+// for details.
+func (s *SearchService) From(from int) *SearchService {
+	s.searchSource = s.searchSource.From(from)
+	return s
+}
+
+// Size defines the maximum number of hits to be returned.
+// Use it in combination with From to paginate through results.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-from-size.html
+// for details.
+func (s *SearchService) Size(size int) *SearchService {
+	s.searchSource = s.searchSource.Size(size)
+	return s
+}
+
+// Explain can be enabled to provide an explanation for each hit and how its
+// score was computed.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html
+// for details.
+func (s *SearchService) Explain(explain bool) *SearchService {
+	s.searchSource = s.searchSource.Explain(explain)
+	return s
+}
+
+// Version can be set to true to return a version for each search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-version.html.
+func (s *SearchService) Version(version bool) *SearchService {
+	s.searchSource = s.searchSource.Version(version)
+	return s
+}
+
+// Sort the results by the given field, in the given order.
+// Use the alternative SortWithInfo to use a struct to define the sorting.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) Sort(field string, ascending bool) *SearchService {
+	s.searchSource = s.searchSource.Sort(field, ascending)
+	return s
+}
+
+// SortWithInfo defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) SortWithInfo(info SortInfo) *SearchService {
+	s.searchSource = s.searchSource.SortWithInfo(info)
+	return s
+}
+
+// SortBy defines how to sort results.
+// Use the Sort func for a shortcut.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-sort.html
+// for detailed documentation of sorting.
+func (s *SearchService) SortBy(sorter ...Sorter) *SearchService {
+	s.searchSource = s.searchSource.SortBy(sorter...)
+	return s
+}
+
+// Fields tells Elasticsearch to only load specific fields from a search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-fields.html.
+func (s *SearchService) Fields(fields ...string) *SearchService {
+	s.searchSource = s.searchSource.Fields(fields...)
+	return s
+}
+
+// Do executes the search and returns a SearchResult.
+func (s *SearchService) Do() (*SearchResult, error) {
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	path += strings.Join(indexPart, ",")
+
+	// Types part
+	if len(s.types) > 0 {
+		typesPart := make([]string, 0)
+		for _, typ := range s.types {
+			typ, err := uritemplates.Expand("{type}", map[string]string{
+				"type": typ,
+			})
+			if err != nil {
+				return nil, err
+			}
+			typesPart = append(typesPart, typ)
+		}
+		path += "/"
+		path += strings.Join(typesPart, ",")
+	}
+
+	// Search
+	path += "/_search"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.searchType != "" {
+		params.Set("search_type", s.searchType)
+	}
+
+	// Perform request
+	var body interface{}
+	if s.source != nil {
+		body = s.source
+	} else {
+		body = s.searchSource.Source()
+	}
+	res, err := s.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return search results
+	ret := new(SearchResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// SearchResult is the result of a search in Elasticsearch.
+type SearchResult struct {
+	TookInMillis int64         `json:"took"`            // search time in milliseconds
+	ScrollId     string        `json:"_scroll_id"`      // only used with Scroll and Scan operations
+	Hits         *SearchHits   `json:"hits"`            // the actual search hits
+	Suggest      SearchSuggest `json:"suggest"`         // results from suggesters
+	Facets       SearchFacets  `json:"facets"`          // results from facets
+	Aggregations Aggregations  `json:"aggregations"`    // results from aggregations
+	TimedOut     bool          `json:"timed_out"`       // true if the search timed out
+	Error        string        `json:"error,omitempty"` // used in MultiSearch only
+}
+
+// TotalHits is a convenience function to return the number of hits for
+// a search result.
+func (r *SearchResult) TotalHits() int64 {
+	if r.Hits != nil {
+		return r.Hits.TotalHits
+	}
+	return 0
+}
+
+// Each is a utility function to iterate over all hits. It saves you from
+// checking for nil values. Notice that Each will ignore errors in
+// serializing JSON.
+func (r *SearchResult) Each(typ reflect.Type) []interface{} {
+	if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 {
+		return nil
+	}
+	slice := make([]interface{}, 0)
+	for _, hit := range r.Hits.Hits {
+		v := reflect.New(typ).Elem()
+		if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil {
+			slice = append(slice, v.Interface())
+		}
+	}
+	return slice
+}
+
+// SearchHits specifies the list of search hits.
+type SearchHits struct {
+	TotalHits int64        `json:"total"`     // total number of hits found
+	MaxScore  *float64     `json:"max_score"` // maximum score of all hits
+	Hits      []*SearchHit `json:"hits"`      // the actual hits returned
+}
+
+// SearchHit is a single hit.
+type SearchHit struct {
+	Score       *float64               `json:"_score"`       // computed score
+	Index       string                 `json:"_index"`       // index name
+	Id          string                 `json:"_id"`          // external or internal
+	Type        string                 `json:"_type"`        // type
+	Version     *int64                 `json:"_version"`     // version number, when Version is set to true in SearchService
+	Sort        []interface{}          `json:"sort"`         // sort information
+	Highlight   SearchHitHighlight     `json:"highlight"`    // highlighter information
+	Source      *json.RawMessage       `json:"_source"`      // stored document source
+	Fields      map[string]interface{} `json:"fields"`       // returned fields
+	Explanation *SearchExplanation     `json:"_explanation"` // explains how the score was computed
+
+	// Shard
+	// HighlightFields
+	// SortValues
+	// MatchedFilters
+}
+
+// SearchExplanation explains how the score for a hit was computed.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-explain.html.
+type SearchExplanation struct {
+	Value       float64             `json:"value"`             // e.g. 1.0
+	Description string              `json:"description"`       // e.g. "boost" or "ConstantScore(*:*), product of:"
+	Details     []SearchExplanation `json:"details,omitempty"` // recursive details
+}
+
+// Suggest
+
+// SearchSuggest is a map of suggestions.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggest map[string][]SearchSuggestion
+
+// SearchSuggestion is a single search suggestion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggestion struct {
+	Text    string                   `json:"text"`
+	Offset  int                      `json:"offset"`
+	Length  int                      `json:"length"`
+	Options []SearchSuggestionOption `json:"options"`
+}
+
+// SearchSuggestionOption is an option of a SearchSuggestion.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-suggesters.html.
+type SearchSuggestionOption struct {
+	Text    string      `json:"text"`
+	Score   float32     `json:"score"`
+	Freq    int         `json:"freq"`
+	Payload interface{} `json:"payload"`
+}
+
+// Facets
+
+// SearchFacets is a map of facets.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html.
+type SearchFacets map[string]*SearchFacet
+
+// SearchFacet is a single facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html.
+type SearchFacet struct {
+	Type    string             `json:"_type"`
+	Missing int                `json:"missing"`
+	Total   int                `json:"total"`
+	Other   int                `json:"other"`
+	Terms   []searchFacetTerm  `json:"terms"`
+	Ranges  []searchFacetRange `json:"ranges"`
+	Entries []searchFacetEntry `json:"entries"`
+}
+
+// searchFacetTerm is the result of a terms facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-terms-facet.html.
+type searchFacetTerm struct {
+	Term  string `json:"term"`
+	Count int    `json:"count"`
+}
+
+// searchFacetRange is the result of a range facet.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-range-facet.html.
+type searchFacetRange struct {
+	From       *float64 `json:"from"`
+	FromStr    *string  `json:"from_str"`
+	To         *float64 `json:"to"`
+	ToStr      *string  `json:"to_str"`
+	Count      int      `json:"count"`
+	Min        *float64 `json:"min"`
+	Max        *float64 `json:"max"`
+	TotalCount int      `json:"total_count"`
+	Total      *float64 `json:"total"`
+	Mean       *float64 `json:"mean"`
+}
+
+// searchFacetEntry is a general facet entry.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html
+type searchFacetEntry struct {
+	// Key for this facet, e.g. in histograms
+	Key interface{} `json:"key"`
+	// Date histograms contain the number of milliseconds as date:
+	// If e.Time = 1293840000000, then: Time.at(1293840000000/1000) => 2011-01-01
+	Time int64 `json:"time"`
+	// Number of hits for this facet
+	Count int `json:"count"`
+	// Min is either a string like "Infinity" or a float64.
+	// This is returned with some DateHistogram facets.
+	Min interface{} `json:"min,omitempty"`
+	// Max is either a string like "-Infinity" or a float64
+	// This is returned with some DateHistogram facets.
+	Max interface{} `json:"max,omitempty"`
+	// Total is the sum of all entries on the recorded Time
+	// This is returned with some DateHistogram facets.
+	Total float64 `json:"total,omitempty"`
+	// TotalCount is the number of entries for Total
+	// This is returned with some DateHistogram facets.
+	TotalCount int `json:"total_count,omitempty"`
+	// Mean is the mean value
+	// This is returned with some DateHistogram facets.
+	Mean float64 `json:"mean,omitempty"`
+}
+
+// Aggregations (see search_aggs.go)
+
+// Highlighting
+
+// SearchHitHighlight is the highlight information of a search hit.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+// for a general discussion of highlighting.
+type SearchHitHighlight map[string][]string

+ 916 - 0
github.com/olivere/elastic/search_aggs.go

@@ -0,0 +1,916 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+)
+
+// Aggregations can be seen as a unit-of-work that build
+// analytic information over a set of documents. It is
+// (in many senses) the follow-up of facets in Elasticsearch.
+// For more details about aggregations, visit:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations.html
+type Aggregation interface {
+	Source() interface{}
+}
+
+// Aggregations is a list of aggregations that are part of a search result.
+type Aggregations map[string]*json.RawMessage
+
+// Min returns min aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html
+func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Max returns max aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html
+func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Sum returns sum aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html
+func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Avg returns average aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html
+func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// ValueCount returns value-count aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html
+func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Cardinality returns cardinality aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
+func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationValueMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Stats returns stats aggregation results.
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html
+func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationStatsMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// ExtendedStats returns extended stats aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html
+func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationExtendedStatsMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Percentiles returns percentiles results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html
+func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationPercentilesMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// PercentileRanks returns percentile ranks results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html
+func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationPercentilesMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// TopHits returns top-hits aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html
+func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationTopHitsMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Global returns global results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html
+func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Filter returns filter results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
+func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Filters returns filters results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html
+func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketFilters)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Missing returns missing results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html
+func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Nested returns nested results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-nested-aggregation.html
+func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// ReverseNested returns reverse-nested results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-reverse-nested-aggregation.html
+func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Children returns children results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html
+func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationSingleBucket)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Terms returns terms aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
+func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketKeyItems)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// SignificantTerms returns significant terms aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html
+func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketSignificantTerms)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Range returns range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html
+func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketRangeItems)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// DateRange returns date range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html
+func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketRangeItems)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// IPv4Range returns IPv4 range aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-iprange-aggregation.html
+func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketRangeItems)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// Histogram returns histogram aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html
+func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketHistogramItems)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// DateHistogram returns date histogram aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html
+func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketHistogramItems)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// GeoBounds returns geo-bounds aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html
+func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationGeoBoundsMetric)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// GeoHash returns geo-hash aggregation results.
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geohashgrid-aggregation.html
+func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketKeyItems)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// GeoDistance returns geo distance aggregation results.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-geodistance-aggregation.html
+func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) {
+	if raw, found := a[name]; found {
+		agg := new(AggregationBucketRangeItems)
+		if raw == nil {
+			return agg, true
+		}
+		if err := json.Unmarshal(*raw, agg); err == nil {
+			return agg, true
+		}
+	}
+	return nil, false
+}
+
+// -- Single value metric --
+
+// AggregationValueMetric is a single-value metric, returned e.g. by a
+// Min or Max aggregation.
+type AggregationValueMetric struct {
+	Aggregations
+
+	Value *float64 //`json:"value"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure.
+func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["value"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Value)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Stats metric --
+
+// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation.
+type AggregationStatsMetric struct {
+	Aggregations
+
+	Count int64    // `json:"count"`
+	Min   *float64 //`json:"min,omitempty"`
+	Max   *float64 //`json:"max,omitempty"`
+	Avg   *float64 //`json:"avg,omitempty"`
+	Sum   *float64 //`json:"sum,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure.
+func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Count)
+	}
+	if v, ok := aggs["min"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Min)
+	}
+	if v, ok := aggs["max"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Max)
+	}
+	if v, ok := aggs["avg"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Avg)
+	}
+	if v, ok := aggs["sum"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Sum)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Extended stats metric --
+
+// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation.
+type AggregationExtendedStatsMetric struct {
+	Aggregations
+
+	Count        int64    // `json:"count"`
+	Min          *float64 //`json:"min,omitempty"`
+	Max          *float64 //`json:"max,omitempty"`
+	Avg          *float64 //`json:"avg,omitempty"`
+	Sum          *float64 //`json:"sum,omitempty"`
+	SumOfSquares *float64 //`json:"sum_of_squares,omitempty"`
+	Variance     *float64 //`json:"variance,omitempty"`
+	StdDeviation *float64 //`json:"std_deviation,omitempty"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure.
+func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Count)
+	}
+	if v, ok := aggs["min"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Min)
+	}
+	if v, ok := aggs["max"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Max)
+	}
+	if v, ok := aggs["avg"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Avg)
+	}
+	if v, ok := aggs["sum"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Sum)
+	}
+	if v, ok := aggs["sum_of_squares"]; ok && v != nil {
+		json.Unmarshal(*v, &a.SumOfSquares)
+	}
+	if v, ok := aggs["variance"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Variance)
+	}
+	if v, ok := aggs["std_deviation"]; ok && v != nil {
+		json.Unmarshal(*v, &a.StdDeviation)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Percentiles metric --
+
+// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation.
+type AggregationPercentilesMetric struct {
+	Aggregations
+
+	Values map[string]float64 // `json:"values"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure.
+func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["values"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Values)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Top-hits metric --
+
+// AggregationTopHitsMetric is a metric returned by a TopHits aggregation.
+type AggregationTopHitsMetric struct {
+	Aggregations
+
+	Hits *SearchHits //`json:"hits"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure.
+func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	a.Aggregations = aggs
+	a.Hits = new(SearchHits)
+	if v, ok := aggs["hits"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Hits)
+	}
+	return nil
+}
+
+// -- Geo-bounds metric --
+
+// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation.
+type AggregationGeoBoundsMetric struct {
+	Aggregations
+
+	Bounds struct {
+		TopLeft struct {
+			Latitude  float64 `json:"lat"`
+			Longitude float64 `json:"lon"`
+		} `json:"top_left"`
+		BottomRight struct {
+			Latitude  float64 `json:"lat"`
+			Longitude float64 `json:"lon"`
+		} `json:"bottom_right"`
+	} `json:"bounds"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure.
+func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["bounds"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Bounds)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Single bucket --
+
+// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global.
+type AggregationSingleBucket struct {
+	Aggregations
+
+	DocCount int64 // `json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure.
+func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["doc_count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.DocCount)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Bucket range items --
+
+// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned
+// with a range aggregation.
+type AggregationBucketRangeItems struct {
+	Aggregations
+
+	DocCountErrorUpperBound int64                         //`json:"doc_count_error_upper_bound"`
+	SumOfOtherDocCount      int64                         //`json:"sum_other_doc_count"`
+	Buckets                 []*AggregationBucketRangeItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure.
+func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+		json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+	}
+	if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.SumOfOtherDocCount)
+	}
+	if v, ok := aggs["buckets"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Buckets)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure.
+type AggregationBucketRangeItem struct {
+	Aggregations
+
+	Key          string   //`json:"key"`
+	DocCount     int64    //`json:"doc_count"`
+	From         *float64 //`json:"from"`
+	FromAsString string   //`json:"from_as_string"`
+	To           *float64 //`json:"to"`
+	ToAsString   string   //`json:"to_as_string"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure.
+func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["key"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Key)
+	}
+	if v, ok := aggs["doc_count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.DocCount)
+	}
+	if v, ok := aggs["from"]; ok && v != nil {
+		json.Unmarshal(*v, &a.From)
+	}
+	if v, ok := aggs["from_as_string"]; ok && v != nil {
+		json.Unmarshal(*v, &a.FromAsString)
+	}
+	if v, ok := aggs["to"]; ok && v != nil {
+		json.Unmarshal(*v, &a.To)
+	}
+	if v, ok := aggs["to_as_string"]; ok && v != nil {
+		json.Unmarshal(*v, &a.ToAsString)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Bucket key items --
+
+// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned
+// with a terms aggregation.
+type AggregationBucketKeyItems struct {
+	Aggregations
+
+	DocCountErrorUpperBound int64                       //`json:"doc_count_error_upper_bound"`
+	SumOfOtherDocCount      int64                       //`json:"sum_other_doc_count"`
+	Buckets                 []*AggregationBucketKeyItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure.
+func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil {
+		json.Unmarshal(*v, &a.DocCountErrorUpperBound)
+	}
+	if v, ok := aggs["sum_other_doc_count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.SumOfOtherDocCount)
+	}
+	if v, ok := aggs["buckets"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Buckets)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure.
+type AggregationBucketKeyItem struct {
+	Aggregations
+
+	Key       interface{} //`json:"key"`
+	KeyNumber json.Number
+	DocCount  int64 //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure.
+func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	dec := json.NewDecoder(bytes.NewReader(data))
+	dec.UseNumber()
+	if err := dec.Decode(&aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["key"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Key)
+		json.Unmarshal(*v, &a.KeyNumber)
+	}
+	if v, ok := aggs["doc_count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.DocCount)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Bucket types for significant terms --
+
+// AggregationBucketSignificantTerms is a bucket aggregation returned
+// with a significant terms aggregation.
+type AggregationBucketSignificantTerms struct {
+	Aggregations
+
+	DocCount int64                               //`json:"doc_count"`
+	Buckets  []*AggregationBucketSignificantTerm //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure.
+func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["doc_count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.DocCount)
+	}
+	if v, ok := aggs["buckets"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Buckets)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure.
+type AggregationBucketSignificantTerm struct {
+	Aggregations
+
+	Key      string  //`json:"key"`
+	DocCount int64   //`json:"doc_count"`
+	BgCount  int64   //`json:"bg_count"`
+	Score    float64 //`json:"score"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure.
+func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["key"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Key)
+	}
+	if v, ok := aggs["doc_count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.DocCount)
+	}
+	if v, ok := aggs["bg_count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.BgCount)
+	}
+	if v, ok := aggs["score"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Score)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Bucket filters --
+
+// AggregationBucketFilters is a multi-bucket aggregation that is returned
+// with a filters aggregation.
+type AggregationBucketFilters struct {
+	Aggregations
+
+	Buckets      []*AggregationBucketKeyItem          //`json:"buckets"`
+	NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure.
+func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["buckets"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Buckets)
+		json.Unmarshal(*v, &a.NamedBuckets)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// -- Bucket histogram items --
+
+// AggregationBucketHistogramItems is a bucket aggregation that is returned
+// with a date histogram aggregation.
+type AggregationBucketHistogramItems struct {
+	Aggregations
+
+	Buckets []*AggregationBucketHistogramItem //`json:"buckets"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure.
+func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["buckets"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Buckets)
+	}
+	a.Aggregations = aggs
+	return nil
+}
+
+// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure.
+type AggregationBucketHistogramItem struct {
+	Aggregations
+
+	Key         int64   //`json:"key"`
+	KeyAsString *string //`json:"key_as_string"`
+	DocCount    int64   //`json:"doc_count"`
+}
+
+// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure.
+func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error {
+	var aggs map[string]*json.RawMessage
+	if err := json.Unmarshal(data, &aggs); err != nil {
+		return err
+	}
+	if v, ok := aggs["key"]; ok && v != nil {
+		json.Unmarshal(*v, &a.Key)
+	}
+	if v, ok := aggs["key_as_string"]; ok && v != nil {
+		json.Unmarshal(*v, &a.KeyAsString)
+	}
+	if v, ok := aggs["doc_count"]; ok && v != nil {
+		json.Unmarshal(*v, &a.DocCount)
+	}
+	a.Aggregations = aggs
+	return nil
+}

+ 109 - 0
github.com/olivere/elastic/search_aggs_avg.go

@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// AvgAggregation is a single-value metrics aggregation that computes
+// the average of numeric values that are extracted from the
+// aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by
+// a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-avg-aggregation.html
+type AvgAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+}
+
+func NewAvgAggregation() AvgAggregation {
+	a := AvgAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a AvgAggregation) Field(field string) AvgAggregation {
+	a.field = field
+	return a
+}
+
+func (a AvgAggregation) Script(script string) AvgAggregation {
+	a.script = script
+	return a
+}
+
+func (a AvgAggregation) ScriptFile(scriptFile string) AvgAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a AvgAggregation) Lang(lang string) AvgAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a AvgAggregation) Format(format string) AvgAggregation {
+	a.format = format
+	return a
+}
+
+func (a AvgAggregation) Param(name string, value interface{}) AvgAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a AvgAggregation) SubAggregation(name string, subAggregation Aggregation) AvgAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a AvgAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "avg_grade" : { "avg" : { "field" : "grade" } }
+	//    }
+	//	}
+	// This method returns only the { "avg" : { "field" : "grade" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["avg"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 128 - 0
github.com/olivere/elastic/search_aggs_cardinality.go

@@ -0,0 +1,128 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// CardinalityAggregation is a single-value metrics aggregation that
+// calculates an approximate count of distinct values.
+// Values can be extracted either from specific fields in the document
+// or generated by a script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-cardinality-aggregation.html
+type CardinalityAggregation struct {
+	field              string
+	script             string
+	scriptFile         string
+	lang               string
+	format             string
+	params             map[string]interface{}
+	subAggregations    map[string]Aggregation
+	precisionThreshold *int64
+	rehash             *bool
+}
+
+func NewCardinalityAggregation() CardinalityAggregation {
+	a := CardinalityAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a CardinalityAggregation) Field(field string) CardinalityAggregation {
+	a.field = field
+	return a
+}
+
+func (a CardinalityAggregation) Script(script string) CardinalityAggregation {
+	a.script = script
+	return a
+}
+
+func (a CardinalityAggregation) ScriptFile(scriptFile string) CardinalityAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a CardinalityAggregation) Lang(lang string) CardinalityAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a CardinalityAggregation) Format(format string) CardinalityAggregation {
+	a.format = format
+	return a
+}
+
+func (a CardinalityAggregation) Param(name string, value interface{}) CardinalityAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) CardinalityAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a CardinalityAggregation) PrecisionThreshold(threshold int64) CardinalityAggregation {
+	a.precisionThreshold = &threshold
+	return a
+}
+
+func (a CardinalityAggregation) Rehash(rehash bool) CardinalityAggregation {
+	a.rehash = &rehash
+	return a
+}
+
+func (a CardinalityAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "author_count" : {
+	//        "cardinality" : { "field" : "author" }
+	//      }
+	//    }
+	//	}
+	// This method returns only the "cardinality" : { "field" : "author" } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["cardinality"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+	if a.precisionThreshold != nil {
+		opts["precision_threshold"] = *a.precisionThreshold
+	}
+	if a.rehash != nil {
+		opts["rehash"] = *a.rehash
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 57 - 0
github.com/olivere/elastic/search_aggs_children.go

@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ChildrenAggregation is a special single bucket aggregation that enables
+// aggregating from buckets on parent document types to buckets on child documents.
+// It is available from 1.4.0.Beta1 upwards.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-children-aggregation.html
+type ChildrenAggregation struct {
+	typ             string
+	subAggregations map[string]Aggregation
+}
+
+func NewChildrenAggregation() ChildrenAggregation {
+	a := ChildrenAggregation{
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a ChildrenAggregation) Type(typ string) ChildrenAggregation {
+	a.typ = typ
+	return a
+}
+
+func (a ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) ChildrenAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a ChildrenAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "to-answers" : {
+	//        "type" : "answer"
+	//      }
+	//    }
+	//	}
+	// This method returns only the { "type" : ... } part.
+
+	source := make(map[string]interface{})
+	source["type"] = a.typ
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 303 - 0
github.com/olivere/elastic/search_aggs_date_histogram.go

@@ -0,0 +1,303 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// DateHistogramAggregation is a multi-bucket aggregation similar to the
+// histogram except it can only be applied on date values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-datehistogram-aggregation.html
+type DateHistogramAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+
+	interval                   string
+	order                      string
+	orderAsc                   bool
+	minDocCount                *int64
+	extendedBoundsMin          interface{}
+	extendedBoundsMax          interface{}
+	preZone                    string
+	postZone                   string
+	preZoneAdjustLargeInterval *bool
+	format                     string
+	preOffset                  int64
+	postOffset                 int64
+	factor                     *float32
+}
+
+func NewDateHistogramAggregation() DateHistogramAggregation {
+	a := DateHistogramAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a DateHistogramAggregation) Field(field string) DateHistogramAggregation {
+	a.field = field
+	return a
+}
+
+func (a DateHistogramAggregation) Script(script string) DateHistogramAggregation {
+	a.script = script
+	return a
+}
+
+func (a DateHistogramAggregation) ScriptFile(scriptFile string) DateHistogramAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a DateHistogramAggregation) Lang(lang string) DateHistogramAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a DateHistogramAggregation) Param(name string, value interface{}) DateHistogramAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) DateHistogramAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+// Allowed values are: "year", "quarter", "month", "week", "day",
+// "hour", "minute". It also supports time settings like "1.5h"
+// (up to "w" for weeks).
+func (a DateHistogramAggregation) Interval(interval string) DateHistogramAggregation {
+	a.interval = interval
+	return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a DateHistogramAggregation) Order(order string, asc bool) DateHistogramAggregation {
+	a.order = order
+	a.orderAsc = asc
+	return a
+}
+
+func (a DateHistogramAggregation) OrderByCount(asc bool) DateHistogramAggregation {
+	// "order" : { "_count" : "asc" }
+	a.order = "_count"
+	a.orderAsc = asc
+	return a
+}
+
+func (a DateHistogramAggregation) OrderByCountAsc() DateHistogramAggregation {
+	return a.OrderByCount(true)
+}
+
+func (a DateHistogramAggregation) OrderByCountDesc() DateHistogramAggregation {
+	return a.OrderByCount(false)
+}
+
+func (a DateHistogramAggregation) OrderByKey(asc bool) DateHistogramAggregation {
+	// "order" : { "_key" : "asc" }
+	a.order = "_key"
+	a.orderAsc = asc
+	return a
+}
+
+func (a DateHistogramAggregation) OrderByKeyAsc() DateHistogramAggregation {
+	return a.OrderByKey(true)
+}
+
+func (a DateHistogramAggregation) OrderByKeyDesc() DateHistogramAggregation {
+	return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) DateHistogramAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "avg_height" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "avg_height" : { "avg" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName
+	a.orderAsc = asc
+	return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) DateHistogramAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "height_stats.avg" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "height_stats" : { "stats" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName + "." + metric
+	a.orderAsc = asc
+	return a
+}
+
+func (a DateHistogramAggregation) MinDocCount(minDocCount int64) DateHistogramAggregation {
+	a.minDocCount = &minDocCount
+	return a
+}
+
+func (a DateHistogramAggregation) PreZone(preZone string) DateHistogramAggregation {
+	a.preZone = preZone
+	return a
+}
+
+func (a DateHistogramAggregation) PostZone(postZone string) DateHistogramAggregation {
+	a.postZone = postZone
+	return a
+}
+
+func (a DateHistogramAggregation) PreZoneAdjustLargeInterval(preZoneAdjustLargeInterval bool) DateHistogramAggregation {
+	a.preZoneAdjustLargeInterval = &preZoneAdjustLargeInterval
+	return a
+}
+
+func (a DateHistogramAggregation) PreOffset(preOffset int64) DateHistogramAggregation {
+	a.preOffset = preOffset
+	return a
+}
+
+func (a DateHistogramAggregation) PostOffset(postOffset int64) DateHistogramAggregation {
+	a.postOffset = postOffset
+	return a
+}
+
+func (a DateHistogramAggregation) Factor(factor float32) DateHistogramAggregation {
+	a.factor = &factor
+	return a
+}
+
+func (a DateHistogramAggregation) Format(format string) DateHistogramAggregation {
+	a.format = format
+	return a
+}
+
+// ExtendedBoundsMin accepts int, int64, string, or time.Time values.
+func (a DateHistogramAggregation) ExtendedBoundsMin(min interface{}) DateHistogramAggregation {
+	a.extendedBoundsMin = min
+	return a
+}
+
+// ExtendedBoundsMax accepts int, int64, string, or time.Time values.
+func (a DateHistogramAggregation) ExtendedBoundsMax(max interface{}) DateHistogramAggregation {
+	a.extendedBoundsMax = max
+	return a
+}
+
+func (a DateHistogramAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "aggs" : {
+	//         "articles_over_time" : {
+	//             "date_histogram" : {
+	//                 "field" : "date",
+	//                 "interval" : "month"
+	//             }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the { "date_histogram" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["date_histogram"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	opts["interval"] = a.interval
+	if a.minDocCount != nil {
+		opts["min_doc_count"] = *a.minDocCount
+	}
+	if a.order != "" {
+		o := make(map[string]interface{})
+		if a.orderAsc {
+			o[a.order] = "asc"
+		} else {
+			o[a.order] = "desc"
+		}
+		opts["order"] = o
+	}
+	if a.preZone != "" {
+		opts["pre_zone"] = a.preZone
+	}
+	if a.postZone != "" {
+		opts["post_zone"] = a.postZone
+	}
+	if a.preZoneAdjustLargeInterval != nil {
+		opts["pre_zone_adjust_large_interval"] = *a.preZoneAdjustLargeInterval
+	}
+	if a.preOffset != 0 {
+		opts["pre_offset"] = a.preOffset
+	}
+	if a.postOffset != 0 {
+		opts["post_offset"] = a.postOffset
+	}
+	if a.factor != nil {
+		opts["factor"] = *a.factor
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
+		bounds := make(map[string]interface{})
+		if a.extendedBoundsMin != nil {
+			bounds["min"] = a.extendedBoundsMin
+		}
+		if a.extendedBoundsMax != nil {
+			bounds["max"] = a.extendedBoundsMax
+		}
+		opts["extended_bounds"] = bounds
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 243 - 0
github.com/olivere/elastic/search_aggs_date_range.go

@@ -0,0 +1,243 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"time"
+)
+
+// DateRangeAggregation is a range aggregation that is dedicated for
+// date values. The main difference between this aggregation and the
+// normal range aggregation is that the from and to values can be expressed
+// in Date Math expressions, and it is also possible to specify a
+// date format by which the from and to response fields will be returned.
+// Note that this aggregration includes the from value and excludes the to
+// value for each range.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-daterange-aggregation.html
+type DateRangeAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+	keyed           *bool
+	unmapped        *bool
+	format          string
+	entries         []DateRangeAggregationEntry
+}
+
+type DateRangeAggregationEntry struct {
+	Key  string
+	From interface{}
+	To   interface{}
+}
+
+func NewDateRangeAggregation() DateRangeAggregation {
+	a := DateRangeAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+		entries:         make([]DateRangeAggregationEntry, 0),
+	}
+	return a
+}
+
+func (a DateRangeAggregation) Field(field string) DateRangeAggregation {
+	a.field = field
+	return a
+}
+
+func (a DateRangeAggregation) Script(script string) DateRangeAggregation {
+	a.script = script
+	return a
+}
+
+func (a DateRangeAggregation) ScriptFile(scriptFile string) DateRangeAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a DateRangeAggregation) Lang(lang string) DateRangeAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a DateRangeAggregation) Param(name string, value interface{}) DateRangeAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) DateRangeAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a DateRangeAggregation) Keyed(keyed bool) DateRangeAggregation {
+	a.keyed = &keyed
+	return a
+}
+
+func (a DateRangeAggregation) Unmapped(unmapped bool) DateRangeAggregation {
+	a.unmapped = &unmapped
+	return a
+}
+
+func (a DateRangeAggregation) Format(format string) DateRangeAggregation {
+	a.format = format
+	return a
+}
+
+func (a DateRangeAggregation) AddRange(from, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) AddUnboundedTo(from interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+	return a
+}
+
+func (a DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+	return a
+}
+
+func (a DateRangeAggregation) AddUnboundedFrom(to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) Lt(to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) LtWithKey(key string, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) Between(from, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to})
+	return a
+}
+
+func (a DateRangeAggregation) Gt(from interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil})
+	return a
+}
+
+func (a DateRangeAggregation) GtWithKey(key string, from interface{}) DateRangeAggregation {
+	a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil})
+	return a
+}
+
+func (a DateRangeAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "aggs" : {
+	//         "range" : {
+	//             "date_range": {
+	//                 "field": "date",
+	//                 "format": "MM-yyy",
+	//                 "ranges": [
+	//                     { "to": "now-10M/M" },
+	//                     { "from": "now-10M/M" }
+	//                 ]
+	//             }
+	//         }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the { "date_range" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["date_range"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	if a.keyed != nil {
+		opts["keyed"] = *a.keyed
+	}
+	if a.unmapped != nil {
+		opts["unmapped"] = *a.unmapped
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+
+	ranges := make([]interface{}, 0)
+	for _, ent := range a.entries {
+		r := make(map[string]interface{})
+		if ent.Key != "" {
+			r["key"] = ent.Key
+		}
+		if ent.From != nil {
+			switch from := ent.From.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["from"] = from
+			case time.Time:
+				r["from"] = from.Format(time.RFC3339)
+			case string:
+				r["from"] = from
+			}
+		}
+		if ent.To != nil {
+			switch to := ent.To.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["to"] = to
+			case time.Time:
+				r["to"] = to.Format(time.RFC3339)
+			case string:
+				r["to"] = to
+			}
+		}
+		ranges = append(ranges, r)
+	}
+	opts["ranges"] = ranges
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 108 - 0
github.com/olivere/elastic/search_aggs_extended_stats.go

@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that
+// computes stats over numeric values extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-extendedstats-aggregation.html
+type ExtendedStatsAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+}
+
+func NewExtendedStatsAggregation() ExtendedStatsAggregation {
+	a := ExtendedStatsAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a ExtendedStatsAggregation) Field(field string) ExtendedStatsAggregation {
+	a.field = field
+	return a
+}
+
+func (a ExtendedStatsAggregation) Script(script string) ExtendedStatsAggregation {
+	a.script = script
+	return a
+}
+
+func (a ExtendedStatsAggregation) ScriptFile(scriptFile string) ExtendedStatsAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a ExtendedStatsAggregation) Lang(lang string) ExtendedStatsAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a ExtendedStatsAggregation) Format(format string) ExtendedStatsAggregation {
+	a.format = format
+	return a
+}
+
+func (a ExtendedStatsAggregation) Param(name string, value interface{}) ExtendedStatsAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) ExtendedStatsAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a ExtendedStatsAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "grades_stats" : { "extended_stats" : { "field" : "grade" } }
+	//    }
+	//	}
+	// This method returns only the { "extended_stats" : { "field" : "grade" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["extended_stats"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 58 - 0
github.com/olivere/elastic/search_aggs_filter.go

@@ -0,0 +1,58 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FilterAggregation defines a single bucket of all the documents
+// in the current document set context that match a specified filter.
+// Often this will be used to narrow down the current aggregation context
+// to a specific set of documents.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filter-aggregation.html
+type FilterAggregation struct {
+	filter          Filter
+	subAggregations map[string]Aggregation
+}
+
+func NewFilterAggregation() FilterAggregation {
+	a := FilterAggregation{
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a FilterAggregation) SubAggregation(name string, subAggregation Aggregation) FilterAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a FilterAggregation) Filter(filter Filter) FilterAggregation {
+	a.filter = filter
+	return a
+}
+
+func (a FilterAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//         "in_stock_products" : {
+	//             "filter" : { "range" : { "stock" : { "gt" : 0 } } }
+	//         }
+	//    }
+	//	}
+	// This method returns only the { "filter" : {} } part.
+
+	source := make(map[string]interface{})
+	source["filter"] = a.filter.Source()
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 76 - 0
github.com/olivere/elastic/search_aggs_filters.go

@@ -0,0 +1,76 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// FiltersAggregation defines a multi bucket aggregations where each bucket
+// is associated with a filter. Each bucket will collect all documents that
+// match its associated filter.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-filters-aggregation.html
+type FiltersAggregation struct {
+	filters         []Filter
+	subAggregations map[string]Aggregation
+}
+
+func NewFiltersAggregation() FiltersAggregation {
+	return FiltersAggregation{
+		filters:         make([]Filter, 0),
+		subAggregations: make(map[string]Aggregation),
+	}
+}
+
+func (a FiltersAggregation) Filter(filter Filter) FiltersAggregation {
+	a.filters = append(a.filters, filter)
+	return a
+}
+
+func (a FiltersAggregation) Filters(filters ...Filter) FiltersAggregation {
+	if len(filters) > 0 {
+		a.filters = append(a.filters, filters...)
+	}
+	return a
+}
+
+func (a FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) FiltersAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a FiltersAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//  "aggs" : {
+	//    "messages" : {
+	//      "filters" : {
+	//        "filters" : {
+	//          "errors" :   { "term" : { "body" : "error"   }},
+	//          "warnings" : { "term" : { "body" : "warning" }}
+	//        }
+	//      }
+	//    }
+	//  }
+	//	}
+	// This method returns only the (outer) { "filters" : {} } part.
+
+	source := make(map[string]interface{})
+	filters := make(map[string]interface{})
+	source["filters"] = filters
+
+	arr := make([]interface{}, len(a.filters))
+	for i, filter := range a.filters {
+		arr[i] = filter.Source()
+	}
+	filters["filters"] = arr
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 104 - 0
github.com/olivere/elastic/search_aggs_geo_bounds.go

@@ -0,0 +1,104 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoBoundsAggregation is a metric aggregation that computes the
+// bounding box containing all geo_point values for a field.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-geobounds-aggregation.html
+type GeoBoundsAggregation struct {
+	field         string
+	script        string
+	scriptFile    string
+	lang          string
+	params        map[string]interface{}
+	wrapLongitude *bool
+}
+
+func NewGeoBoundsAggregation() GeoBoundsAggregation {
+	a := GeoBoundsAggregation{}
+	return a
+}
+
+func (a GeoBoundsAggregation) Field(field string) GeoBoundsAggregation {
+	a.field = field
+	return a
+}
+
+func (a GeoBoundsAggregation) Script(script string) GeoBoundsAggregation {
+	a.script = script
+	return a
+}
+
+func (a GeoBoundsAggregation) ScriptFile(scriptFile string) GeoBoundsAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a GeoBoundsAggregation) Lang(lang string) GeoBoundsAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a GeoBoundsAggregation) Params(params map[string]interface{}) GeoBoundsAggregation {
+	a.params = params
+	return a
+}
+
+func (a GeoBoundsAggregation) Param(name string, value interface{}) GeoBoundsAggregation {
+	if a.params == nil {
+		a.params = make(map[string]interface{})
+	}
+	a.params[name] = value
+	return a
+}
+
+func (a GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) GeoBoundsAggregation {
+	a.wrapLongitude = &wrapLongitude
+	return a
+}
+
+func (a GeoBoundsAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "query" : {
+	//         "match" : { "business_type" : "shop" }
+	//     },
+	//     "aggs" : {
+	//         "viewport" : {
+	//             "geo_bounds" : {
+	//                 "field" : "location"
+	//                 "wrap_longitude" : "true"
+	//             }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the { "geo_bounds" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["geo_bounds"] = opts
+
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.params != nil && len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+	if a.wrapLongitude != nil {
+		opts["wrap_longitude"] = *a.wrapLongitude
+	}
+
+	return source
+}

+ 180 - 0
github.com/olivere/elastic/search_aggs_geo_distance.go

@@ -0,0 +1,180 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields
+// and conceptually works very similar to the range aggregation.
+// The user can define a point of origin and a set of distance range buckets.
+// The aggregation evaluate the distance of each document value from
+// the origin point and determines the buckets it belongs to based on
+// the ranges (a document belongs to a bucket if the distance between the
+// document and the origin falls within the distance range of the bucket).
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-geodistance-aggregation.html
+type GeoDistanceAggregation struct {
+	field           string
+	unit            string
+	distanceType    string
+	point           string
+	ranges          []geoDistAggRange
+	subAggregations map[string]Aggregation
+}
+
+type geoDistAggRange struct {
+	Key  string
+	From interface{}
+	To   interface{}
+}
+
+func NewGeoDistanceAggregation() GeoDistanceAggregation {
+	a := GeoDistanceAggregation{
+		subAggregations: make(map[string]Aggregation),
+		ranges:          make([]geoDistAggRange, 0),
+	}
+	return a
+}
+
+func (a GeoDistanceAggregation) Field(field string) GeoDistanceAggregation {
+	a.field = field
+	return a
+}
+
+func (a GeoDistanceAggregation) Unit(unit string) GeoDistanceAggregation {
+	a.unit = unit
+	return a
+}
+
+func (a GeoDistanceAggregation) DistanceType(distanceType string) GeoDistanceAggregation {
+	a.distanceType = distanceType
+	return a
+}
+
+func (a GeoDistanceAggregation) Point(latLon string) GeoDistanceAggregation {
+	a.point = latLon
+	return a
+}
+
+func (a GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) GeoDistanceAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a GeoDistanceAggregation) AddRange(from, to interface{}) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedTo(from float64) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedFrom(to float64) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) Between(from, to interface{}) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) GeoDistanceAggregation {
+	a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to})
+	return a
+}
+
+func (a GeoDistanceAggregation) Source() interface{} {
+	// Example:
+	// {
+	//    "aggs" : {
+	//        "rings_around_amsterdam" : {
+	//            "geo_distance" : {
+	//                "field" : "location",
+	//                "origin" : "52.3760, 4.894",
+	//                "ranges" : [
+	//                    { "to" : 100 },
+	//                    { "from" : 100, "to" : 300 },
+	//                    { "from" : 300 }
+	//                ]
+	//            }
+	//        }
+	//    }
+	// }
+	//
+	// This method returns only the { "range" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["geo_distance"] = opts
+
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.unit != "" {
+		opts["unit"] = a.unit
+	}
+	if a.distanceType != "" {
+		opts["distance_type"] = a.distanceType
+	}
+	if a.point != "" {
+		opts["origin"] = a.point
+	}
+
+	ranges := make([]interface{}, 0)
+	for _, ent := range a.ranges {
+		r := make(map[string]interface{})
+		if ent.Key != "" {
+			r["key"] = ent.Key
+		}
+		if ent.From != nil {
+			switch from := ent.From.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["from"] = from
+			case *int, *int16, *int32, *int64, *float32, *float64:
+				r["from"] = from
+			case string:
+				r["from"] = from
+			}
+		}
+		if ent.To != nil {
+			switch to := ent.To.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["to"] = to
+			case *int, *int16, *int32, *int64, *float32, *float64:
+				r["to"] = to
+			case string:
+				r["to"] = to
+			}
+		}
+		ranges = append(ranges, r)
+	}
+	opts["ranges"] = ranges
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 56 - 0
github.com/olivere/elastic/search_aggs_global.go

@@ -0,0 +1,56 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// GlobalAggregation defines a single bucket of all the documents within
+// the search execution context. This context is defined by the indices
+// and the document types you’re searching on, but is not influenced
+// by the search query itself.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-global-aggregation.html
+type GlobalAggregation struct {
+	subAggregations map[string]Aggregation
+}
+
+func NewGlobalAggregation() GlobalAggregation {
+	a := GlobalAggregation{
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) GlobalAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a GlobalAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//         "all_products" : {
+	//             "global" : {},
+	//             "aggs" : {
+	//                 "avg_price" : { "avg" : { "field" : "price" } }
+	//             }
+	//         }
+	//    }
+	//	}
+	// This method returns only the { "global" : {} } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["global"] = opts
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 234 - 0
github.com/olivere/elastic/search_aggs_histogram.go

@@ -0,0 +1,234 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// HistogramAggregation is a multi-bucket values source based aggregation
+// that can be applied on numeric values extracted from the documents.
+// It dynamically builds fixed size (a.k.a. interval) buckets over the
+// values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-histogram-aggregation.html
+type HistogramAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+
+	interval          int64
+	order             string
+	orderAsc          bool
+	minDocCount       *int64
+	extendedBoundsMin *int64
+	extendedBoundsMax *int64
+}
+
+func NewHistogramAggregation() HistogramAggregation {
+	a := HistogramAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a HistogramAggregation) Field(field string) HistogramAggregation {
+	a.field = field
+	return a
+}
+
+func (a HistogramAggregation) Script(script string) HistogramAggregation {
+	a.script = script
+	return a
+}
+
+func (a HistogramAggregation) ScriptFile(scriptFile string) HistogramAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a HistogramAggregation) Lang(lang string) HistogramAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a HistogramAggregation) Param(name string, value interface{}) HistogramAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) HistogramAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a HistogramAggregation) Interval(interval int64) HistogramAggregation {
+	a.interval = interval
+	return a
+}
+
+// Order specifies the sort order. Valid values for order are:
+// "_key", "_count", a sub-aggregation name, or a sub-aggregation name
+// with a metric.
+func (a HistogramAggregation) Order(order string, asc bool) HistogramAggregation {
+	a.order = order
+	a.orderAsc = asc
+	return a
+}
+
+func (a HistogramAggregation) OrderByCount(asc bool) HistogramAggregation {
+	// "order" : { "_count" : "asc" }
+	a.order = "_count"
+	a.orderAsc = asc
+	return a
+}
+
+func (a HistogramAggregation) OrderByCountAsc() HistogramAggregation {
+	return a.OrderByCount(true)
+}
+
+func (a HistogramAggregation) OrderByCountDesc() HistogramAggregation {
+	return a.OrderByCount(false)
+}
+
+func (a HistogramAggregation) OrderByKey(asc bool) HistogramAggregation {
+	// "order" : { "_key" : "asc" }
+	a.order = "_key"
+	a.orderAsc = asc
+	return a
+}
+
+func (a HistogramAggregation) OrderByKeyAsc() HistogramAggregation {
+	return a.OrderByKey(true)
+}
+
+func (a HistogramAggregation) OrderByKeyDesc() HistogramAggregation {
+	return a.OrderByKey(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a HistogramAggregation) OrderByAggregation(aggName string, asc bool) HistogramAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "avg_height" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "avg_height" : { "avg" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName
+	a.orderAsc = asc
+	return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) HistogramAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "height_stats.avg" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "height_stats" : { "stats" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName + "." + metric
+	a.orderAsc = asc
+	return a
+}
+
+func (a HistogramAggregation) MinDocCount(minDocCount int64) HistogramAggregation {
+	a.minDocCount = &minDocCount
+	return a
+}
+
+func (a HistogramAggregation) ExtendedBoundsMin(min int64) HistogramAggregation {
+	a.extendedBoundsMin = &min
+	return a
+}
+
+func (a HistogramAggregation) ExtendedBoundsMax(max int64) HistogramAggregation {
+	a.extendedBoundsMax = &max
+	return a
+}
+
+func (a HistogramAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "aggs" : {
+	//         "prices" : {
+	//             "histogram" : {
+	//                 "field" : "price",
+	//                 "interval" : 50
+	//             }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the { "histogram" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["histogram"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	opts["interval"] = a.interval
+	if a.order != "" {
+		o := make(map[string]interface{})
+		if a.orderAsc {
+			o[a.order] = "asc"
+		} else {
+			o[a.order] = "desc"
+		}
+		opts["order"] = o
+	}
+	if a.minDocCount != nil {
+		opts["min_doc_count"] = *a.minDocCount
+	}
+	if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil {
+		bounds := make(map[string]interface{})
+		if a.extendedBoundsMin != nil {
+			bounds["min"] = a.extendedBoundsMin
+		}
+		if a.extendedBoundsMax != nil {
+			bounds["max"] = a.extendedBoundsMax
+		}
+		opts["extended_bounds"] = bounds
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 109 - 0
github.com/olivere/elastic/search_aggs_max.go

@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MaxAggregation is a single-value metrics aggregation that keeps track and
+// returns the maximum value among the numeric values extracted from
+// the aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by
+// a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-max-aggregation.html
+type MaxAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+}
+
+func NewMaxAggregation() MaxAggregation {
+	a := MaxAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a MaxAggregation) Field(field string) MaxAggregation {
+	a.field = field
+	return a
+}
+
+func (a MaxAggregation) Script(script string) MaxAggregation {
+	a.script = script
+	return a
+}
+
+func (a MaxAggregation) ScriptFile(scriptFile string) MaxAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a MaxAggregation) Lang(lang string) MaxAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a MaxAggregation) Format(format string) MaxAggregation {
+	a.format = format
+	return a
+}
+
+func (a MaxAggregation) Param(name string, value interface{}) MaxAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a MaxAggregation) SubAggregation(name string, subAggregation Aggregation) MaxAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a MaxAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "max_price" : { "max" : { "field" : "price" } }
+	//    }
+	//	}
+	// This method returns only the { "max" : { "field" : "price" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["max"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 109 - 0
github.com/olivere/elastic/search_aggs_min.go

@@ -0,0 +1,109 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MinAggregation is a single-value metrics aggregation that keeps track and
+// returns the minimum value among numeric values extracted from the
+// aggregated documents. These values can be extracted either from
+// specific numeric fields in the documents, or be generated by a
+// provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-min-aggregation.html
+type MinAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+}
+
+func NewMinAggregation() MinAggregation {
+	a := MinAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a MinAggregation) Field(field string) MinAggregation {
+	a.field = field
+	return a
+}
+
+func (a MinAggregation) Script(script string) MinAggregation {
+	a.script = script
+	return a
+}
+
+func (a MinAggregation) ScriptFile(scriptFile string) MinAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a MinAggregation) Lang(lang string) MinAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a MinAggregation) Format(format string) MinAggregation {
+	a.format = format
+	return a
+}
+
+func (a MinAggregation) Param(name string, value interface{}) MinAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a MinAggregation) SubAggregation(name string, subAggregation Aggregation) MinAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a MinAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "min_price" : { "min" : { "field" : "price" } }
+	//    }
+	//	}
+	// This method returns only the { "min" : { "field" : "price" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["min"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 66 - 0
github.com/olivere/elastic/search_aggs_missing.go

@@ -0,0 +1,66 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// MissingAggregation is a field data based single bucket aggregation,
+// that creates a bucket of all documents in the current document set context
+// that are missing a field value (effectively, missing a field or having
+// the configured NULL value set). This aggregator will often be used in
+// conjunction with other field data bucket aggregators (such as ranges)
+// to return information for all the documents that could not be placed
+// in any of the other buckets due to missing field data values.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-missing-aggregation.html
+type MissingAggregation struct {
+	field           string
+	subAggregations map[string]Aggregation
+}
+
+func NewMissingAggregation() MissingAggregation {
+	a := MissingAggregation{
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a MissingAggregation) Field(field string) MissingAggregation {
+	a.field = field
+	return a
+}
+
+func (a MissingAggregation) SubAggregation(name string, subAggregation Aggregation) MissingAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a MissingAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "products_without_a_price" : {
+	//        "missing" : { "field" : "price" }
+	//      }
+	//    }
+	//	}
+	// This method returns only the { "missing" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["missing"] = opts
+
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 67 - 0
github.com/olivere/elastic/search_aggs_nested.go

@@ -0,0 +1,67 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// NestedAggregation is a special single bucket aggregation that enables
+// aggregating nested documents.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-aggregations-bucket-nested-aggregation.html
+type NestedAggregation struct {
+	path            string
+	subAggregations map[string]Aggregation
+}
+
+func NewNestedAggregation() NestedAggregation {
+	a := NestedAggregation{
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a NestedAggregation) SubAggregation(name string, subAggregation Aggregation) NestedAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a NestedAggregation) Path(path string) NestedAggregation {
+	a.path = path
+	return a
+}
+
+func (a NestedAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//     "query" : {
+	//         "match" : { "name" : "led tv" }
+	//     }
+	//     "aggs" : {
+	//         "resellers" : {
+	//             "nested" : {
+	//                 "path" : "resellers"
+	//             },
+	//             "aggs" : {
+	//                 "min_price" : { "min" : { "field" : "resellers.price" } }
+	//             }
+	//         }
+	//     }
+	//	}
+	// This method returns only the { "nested" : {} } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["nested"] = opts
+
+	opts["path"] = a.path
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 141 - 0
github.com/olivere/elastic/search_aggs_percentile_ranks.go

@@ -0,0 +1,141 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PercentileRanksAggregation
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-rank-aggregation.html
+type PercentileRanksAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+	values          []float64
+	compression     *float64
+	estimator       string
+}
+
+func NewPercentileRanksAggregation() PercentileRanksAggregation {
+	a := PercentileRanksAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+		values:          make([]float64, 0),
+	}
+	return a
+}
+
+func (a PercentileRanksAggregation) Field(field string) PercentileRanksAggregation {
+	a.field = field
+	return a
+}
+
+func (a PercentileRanksAggregation) Script(script string) PercentileRanksAggregation {
+	a.script = script
+	return a
+}
+
+func (a PercentileRanksAggregation) ScriptFile(scriptFile string) PercentileRanksAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a PercentileRanksAggregation) Lang(lang string) PercentileRanksAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a PercentileRanksAggregation) Format(format string) PercentileRanksAggregation {
+	a.format = format
+	return a
+}
+
+func (a PercentileRanksAggregation) Param(name string, value interface{}) PercentileRanksAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) PercentileRanksAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a PercentileRanksAggregation) Values(values ...float64) PercentileRanksAggregation {
+	a.values = make([]float64, 0)
+	a.values = append(a.values, values...)
+	return a
+}
+
+func (a PercentileRanksAggregation) Compression(compression float64) PercentileRanksAggregation {
+	a.compression = &compression
+	return a
+}
+
+func (a PercentileRanksAggregation) Estimator(estimator string) PercentileRanksAggregation {
+	a.estimator = estimator
+	return a
+}
+
+func (a PercentileRanksAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "load_time_outlier" : {
+	//         "percentile_ranks" : {
+	//           "field" : "load_time"
+	//           "values" : [15, 30]
+	//         }
+	//       }
+	//    }
+	//	}
+	// This method returns only the
+	//   { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } }
+	// part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["percentile_ranks"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+	if len(a.values) > 0 {
+		opts["values"] = a.values
+	}
+	if a.compression != nil {
+		opts["compression"] = *a.compression
+	}
+	if a.estimator != "" {
+		opts["estimator"] = a.estimator
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 140 - 0
github.com/olivere/elastic/search_aggs_percentiles.go

@@ -0,0 +1,140 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// PercentilesAggregation
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-percentile-aggregation.html
+type PercentilesAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+	percentiles     []float64
+	compression     *float64
+	estimator       string
+}
+
+func NewPercentilesAggregation() PercentilesAggregation {
+	a := PercentilesAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+		percentiles:     make([]float64, 0),
+	}
+	return a
+}
+
+func (a PercentilesAggregation) Field(field string) PercentilesAggregation {
+	a.field = field
+	return a
+}
+
+func (a PercentilesAggregation) Script(script string) PercentilesAggregation {
+	a.script = script
+	return a
+}
+
+func (a PercentilesAggregation) ScriptFile(scriptFile string) PercentilesAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a PercentilesAggregation) Lang(lang string) PercentilesAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a PercentilesAggregation) Format(format string) PercentilesAggregation {
+	a.format = format
+	return a
+}
+
+func (a PercentilesAggregation) Param(name string, value interface{}) PercentilesAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) PercentilesAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a PercentilesAggregation) Percentiles(percentiles ...float64) PercentilesAggregation {
+	a.percentiles = make([]float64, 0)
+	a.percentiles = append(a.percentiles, percentiles...)
+	return a
+}
+
+func (a PercentilesAggregation) Compression(compression float64) PercentilesAggregation {
+	a.compression = &compression
+	return a
+}
+
+func (a PercentilesAggregation) Estimator(estimator string) PercentilesAggregation {
+	a.estimator = estimator
+	return a
+}
+
+func (a PercentilesAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "load_time_outlier" : {
+	//           "percentiles" : {
+	//               "field" : "load_time"
+	//           }
+	//       }
+	//    }
+	//	}
+	// This method returns only the
+	//   { "percentiles" : { "field" : "load_time" } }
+	// part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["percentiles"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+	if len(a.percentiles) > 0 {
+		opts["percents"] = a.percentiles
+	}
+	if a.compression != nil {
+		opts["compression"] = *a.compression
+	}
+	if a.estimator != "" {
+		opts["estimator"] = a.estimator
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 232 - 0
github.com/olivere/elastic/search_aggs_range.go

@@ -0,0 +1,232 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"time"
+)
+
+// RangeAggregation is a multi-bucket value source based aggregation that
+// enables the user to define a set of ranges - each representing a bucket.
+// During the aggregation process, the values extracted from each document
+// will be checked against each bucket range and "bucket" the
+// relevant/matching document. Note that this aggregration includes the
+// from value and excludes the to value for each range.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-range-aggregation.html
+type RangeAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+	keyed           *bool
+	unmapped        *bool
+	entries         []rangeAggregationEntry
+}
+
+type rangeAggregationEntry struct {
+	Key  string
+	From interface{}
+	To   interface{}
+}
+
+func NewRangeAggregation() RangeAggregation {
+	a := RangeAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+		entries:         make([]rangeAggregationEntry, 0),
+	}
+	return a
+}
+
+func (a RangeAggregation) Field(field string) RangeAggregation {
+	a.field = field
+	return a
+}
+
+func (a RangeAggregation) Script(script string) RangeAggregation {
+	a.script = script
+	return a
+}
+
+func (a RangeAggregation) ScriptFile(scriptFile string) RangeAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a RangeAggregation) Lang(lang string) RangeAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a RangeAggregation) Param(name string, value interface{}) RangeAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a RangeAggregation) SubAggregation(name string, subAggregation Aggregation) RangeAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a RangeAggregation) Keyed(keyed bool) RangeAggregation {
+	a.keyed = &keyed
+	return a
+}
+
+func (a RangeAggregation) Unmapped(unmapped bool) RangeAggregation {
+	a.unmapped = &unmapped
+	return a
+}
+
+func (a RangeAggregation) AddRange(from, to interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
+	return a
+}
+
+func (a RangeAggregation) AddRangeWithKey(key string, from, to interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
+	return a
+}
+
+func (a RangeAggregation) AddUnboundedTo(from interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
+	return a
+}
+
+func (a RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
+	return a
+}
+
+func (a RangeAggregation) AddUnboundedFrom(to interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
+	return a
+}
+
+func (a RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
+	return a
+}
+
+func (a RangeAggregation) Lt(to interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to})
+	return a
+}
+
+func (a RangeAggregation) LtWithKey(key string, to interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to})
+	return a
+}
+
+func (a RangeAggregation) Between(from, to interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to})
+	return a
+}
+
+func (a RangeAggregation) BetweenWithKey(key string, from, to interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to})
+	return a
+}
+
+func (a RangeAggregation) Gt(from interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil})
+	return a
+}
+
+func (a RangeAggregation) GtWithKey(key string, from interface{}) RangeAggregation {
+	a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil})
+	return a
+}
+
+func (a RangeAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "aggs" : {
+	//         "price_ranges" : {
+	//             "range" : {
+	//                 "field" : "price",
+	//                 "ranges" : [
+	//                     { "to" : 50 },
+	//                     { "from" : 50, "to" : 100 },
+	//                     { "from" : 100 }
+	//                 ]
+	//             }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the { "range" : { ... } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["range"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	if a.keyed != nil {
+		opts["keyed"] = *a.keyed
+	}
+	if a.unmapped != nil {
+		opts["unmapped"] = *a.unmapped
+	}
+
+	ranges := make([]interface{}, 0)
+	for _, ent := range a.entries {
+		r := make(map[string]interface{})
+		if ent.Key != "" {
+			r["key"] = ent.Key
+		}
+		if ent.From != nil {
+			switch from := ent.From.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["from"] = from
+			case time.Time:
+				r["from"] = from.Format(time.RFC3339)
+			case string:
+				r["from"] = from
+			}
+		}
+		if ent.To != nil {
+			switch to := ent.To.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["to"] = to
+			case time.Time:
+				r["to"] = to.Format(time.RFC3339)
+			case string:
+				r["to"] = to
+			}
+		}
+		ranges = append(ranges, r)
+	}
+	opts["ranges"] = ranges
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 96 - 0
github.com/olivere/elastic/search_aggs_significant_terms.go

@@ -0,0 +1,96 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SignificantSignificantTermsAggregation is an aggregation that returns interesting
+// or unusual occurrences of terms in a set.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html
+type SignificantTermsAggregation struct {
+	field           string
+	subAggregations map[string]Aggregation
+
+	requiredSize *int
+	shardSize    *int
+	minDocCount  *int
+}
+
+func NewSignificantTermsAggregation() SignificantTermsAggregation {
+	a := SignificantTermsAggregation{
+		subAggregations: make(map[string]Aggregation, 0),
+	}
+	return a
+}
+
+func (a SignificantTermsAggregation) Field(field string) SignificantTermsAggregation {
+	a.field = field
+	return a
+}
+
+func (a SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) SignificantTermsAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a SignificantTermsAggregation) RequiredSize(requiredSize int) SignificantTermsAggregation {
+	a.requiredSize = &requiredSize
+	return a
+}
+
+func (a SignificantTermsAggregation) SharedSize(shardSize int) SignificantTermsAggregation {
+	a.shardSize = &shardSize
+	return a
+}
+
+func (a SignificantTermsAggregation) MinDocCount(minDocCount int) SignificantTermsAggregation {
+	a.minDocCount = &minDocCount
+	return a
+}
+
+func (a SignificantTermsAggregation) Source() interface{} {
+	// Example:
+	// {
+	//     "query" : {
+	//         "terms" : {"force" : [ "British Transport Police" ]}
+	//     },
+	//     "aggregations" : {
+	//         "significantCrimeTypes" : {
+	//             "significant_terms" : { "field" : "crime_type" }
+	//         }
+	//     }
+	// }
+	//
+	// This method returns only the
+	//   { "significant_terms" : { "field" : "crime_type" }
+	// part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["significant_terms"] = opts
+
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.requiredSize != nil {
+		opts["size"] = *a.requiredSize // not a typo!
+	}
+	if a.shardSize != nil {
+		opts["shard_size"] = *a.shardSize
+	}
+	if a.minDocCount != nil {
+		// TODO(oe) not sure if minDocCount is a typo in ES and should be min_doc_count!
+		opts["minDocCount"] = *a.minDocCount
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 108 - 0
github.com/olivere/elastic/search_aggs_stats.go

@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// StatsAggregation is a multi-value metrics aggregation that computes stats
+// over numeric values extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-stats-aggregation.html
+type StatsAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+}
+
+func NewStatsAggregation() StatsAggregation {
+	a := StatsAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a StatsAggregation) Field(field string) StatsAggregation {
+	a.field = field
+	return a
+}
+
+func (a StatsAggregation) Script(script string) StatsAggregation {
+	a.script = script
+	return a
+}
+
+func (a StatsAggregation) ScriptFile(scriptFile string) StatsAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a StatsAggregation) Lang(lang string) StatsAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a StatsAggregation) Format(format string) StatsAggregation {
+	a.format = format
+	return a
+}
+
+func (a StatsAggregation) Param(name string, value interface{}) StatsAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a StatsAggregation) SubAggregation(name string, subAggregation Aggregation) StatsAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a StatsAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "grades_stats" : { "stats" : { "field" : "grade" } }
+	//    }
+	//	}
+	// This method returns only the { "stats" : { "field" : "grade" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["stats"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 108 - 0
github.com/olivere/elastic/search_aggs_sum.go

@@ -0,0 +1,108 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// SumAggregation is a single-value metrics aggregation that sums up
+// numeric values that are extracted from the aggregated documents.
+// These values can be extracted either from specific numeric fields
+// in the documents, or be generated by a provided script.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-sum-aggregation.html
+type SumAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+}
+
+func NewSumAggregation() SumAggregation {
+	a := SumAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a SumAggregation) Field(field string) SumAggregation {
+	a.field = field
+	return a
+}
+
+func (a SumAggregation) Script(script string) SumAggregation {
+	a.script = script
+	return a
+}
+
+func (a SumAggregation) ScriptFile(scriptFile string) SumAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a SumAggregation) Lang(lang string) SumAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a SumAggregation) Format(format string) SumAggregation {
+	a.format = format
+	return a
+}
+
+func (a SumAggregation) Param(name string, value interface{}) SumAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a SumAggregation) SubAggregation(name string, subAggregation Aggregation) SumAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a SumAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "intraday_return" : { "sum" : { "field" : "change" } }
+	//    }
+	//	}
+	// This method returns only the { "sum" : { "field" : "change" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["sum"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 339 - 0
github.com/olivere/elastic/search_aggs_terms.go

@@ -0,0 +1,339 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TermsAggregation is a multi-bucket value source based aggregation
+// where buckets are dynamically built - one per unique value.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-bucket-terms-aggregation.html
+type TermsAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+
+	size                  *int
+	shardSize             *int
+	requiredSize          *int
+	minDocCount           *int
+	shardMinDocCount      *int
+	valueType             string
+	order                 string
+	orderAsc              bool
+	includePattern        string
+	includeFlags          *int
+	excludePattern        string
+	excludeFlags          *int
+	executionHint         string
+	collectionMode        string
+	showTermDocCountError *bool
+	includeTerms          []string
+	excludeTerms          []string
+}
+
+func NewTermsAggregation() TermsAggregation {
+	a := TermsAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation, 0),
+		includeTerms:    make([]string, 0),
+		excludeTerms:    make([]string, 0),
+	}
+	return a
+}
+
+func (a TermsAggregation) Field(field string) TermsAggregation {
+	a.field = field
+	return a
+}
+
+func (a TermsAggregation) Script(script string) TermsAggregation {
+	a.script = script
+	return a
+}
+
+func (a TermsAggregation) ScriptFile(scriptFile string) TermsAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a TermsAggregation) Lang(lang string) TermsAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a TermsAggregation) Param(name string, value interface{}) TermsAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a TermsAggregation) SubAggregation(name string, subAggregation Aggregation) TermsAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a TermsAggregation) Size(size int) TermsAggregation {
+	a.size = &size
+	return a
+}
+
+func (a TermsAggregation) RequiredSize(requiredSize int) TermsAggregation {
+	a.requiredSize = &requiredSize
+	return a
+}
+
+func (a TermsAggregation) ShardSize(shardSize int) TermsAggregation {
+	a.shardSize = &shardSize
+	return a
+}
+
+func (a TermsAggregation) MinDocCount(minDocCount int) TermsAggregation {
+	a.minDocCount = &minDocCount
+	return a
+}
+
+func (a TermsAggregation) ShardMinDocCount(shardMinDocCount int) TermsAggregation {
+	a.shardMinDocCount = &shardMinDocCount
+	return a
+}
+
+func (a TermsAggregation) Include(regexp string) TermsAggregation {
+	a.includePattern = regexp
+	return a
+}
+
+func (a TermsAggregation) IncludeWithFlags(regexp string, flags int) TermsAggregation {
+	a.includePattern = regexp
+	a.includeFlags = &flags
+	return a
+}
+
+func (a TermsAggregation) Exclude(regexp string) TermsAggregation {
+	a.excludePattern = regexp
+	return a
+}
+
+func (a TermsAggregation) ExcludeWithFlags(regexp string, flags int) TermsAggregation {
+	a.excludePattern = regexp
+	a.excludeFlags = &flags
+	return a
+}
+
+// ValueType can be string, long, or double.
+func (a TermsAggregation) ValueType(valueType string) TermsAggregation {
+	a.valueType = valueType
+	return a
+}
+
+func (a TermsAggregation) Order(order string, asc bool) TermsAggregation {
+	a.order = order
+	a.orderAsc = asc
+	return a
+}
+
+func (a TermsAggregation) OrderByCount(asc bool) TermsAggregation {
+	// "order" : { "_count" : "asc" }
+	a.order = "_count"
+	a.orderAsc = asc
+	return a
+}
+
+func (a TermsAggregation) OrderByCountAsc() TermsAggregation {
+	return a.OrderByCount(true)
+}
+
+func (a TermsAggregation) OrderByCountDesc() TermsAggregation {
+	return a.OrderByCount(false)
+}
+
+func (a TermsAggregation) OrderByTerm(asc bool) TermsAggregation {
+	// "order" : { "_term" : "asc" }
+	a.order = "_term"
+	a.orderAsc = asc
+	return a
+}
+
+func (a TermsAggregation) OrderByTermAsc() TermsAggregation {
+	return a.OrderByTerm(true)
+}
+
+func (a TermsAggregation) OrderByTermDesc() TermsAggregation {
+	return a.OrderByTerm(false)
+}
+
+// OrderByAggregation creates a bucket ordering strategy which sorts buckets
+// based on a single-valued calc get.
+func (a TermsAggregation) OrderByAggregation(aggName string, asc bool) TermsAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "avg_height" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "avg_height" : { "avg" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName
+	a.orderAsc = asc
+	return a
+}
+
+// OrderByAggregationAndMetric creates a bucket ordering strategy which
+// sorts buckets based on a multi-valued calc get.
+func (a TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) TermsAggregation {
+	// {
+	//     "aggs" : {
+	//         "genders" : {
+	//             "terms" : {
+	//                 "field" : "gender",
+	//                 "order" : { "height_stats.avg" : "desc" }
+	//             },
+	//             "aggs" : {
+	//                 "height_stats" : { "stats" : { "field" : "height" } }
+	//             }
+	//         }
+	//     }
+	// }
+	a.order = aggName + "." + metric
+	a.orderAsc = asc
+	return a
+}
+
+func (a TermsAggregation) ExecutionHint(hint string) TermsAggregation {
+	a.executionHint = hint
+	return a
+}
+
+// Collection mode can be depth_first or breadth_first as of 1.4.0.
+func (a TermsAggregation) CollectionMode(collectionMode string) TermsAggregation {
+	a.collectionMode = collectionMode
+	return a
+}
+
+func (a TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) TermsAggregation {
+	a.showTermDocCountError = &showTermDocCountError
+	return a
+}
+
+func (a TermsAggregation) IncludeTerms(terms ...string) TermsAggregation {
+	a.includeTerms = append(a.includeTerms, terms...)
+	return a
+}
+
+func (a TermsAggregation) ExcludeTerms(terms ...string) TermsAggregation {
+	a.excludeTerms = append(a.excludeTerms, terms...)
+	return a
+}
+
+func (a TermsAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "genders" : {
+	//        "terms" : { "field" : "gender" }
+	//      }
+	//    }
+	//	}
+	// This method returns only the { "terms" : { "field" : "gender" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["terms"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	// TermsBuilder
+	if a.size != nil && *a.size >= 0 {
+		opts["size"] = *a.size
+	}
+	if a.shardSize != nil && *a.shardSize >= 0 {
+		opts["shard_size"] = *a.shardSize
+	}
+	if a.requiredSize != nil && *a.requiredSize >= 0 {
+		opts["required_size"] = *a.requiredSize
+	}
+	if a.minDocCount != nil && *a.minDocCount >= 0 {
+		opts["min_doc_count"] = *a.minDocCount
+	}
+	if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 {
+		opts["shard_min_doc_count"] = *a.shardMinDocCount
+	}
+	if a.showTermDocCountError != nil {
+		opts["show_term_doc_count_error"] = *a.showTermDocCountError
+	}
+	if a.collectionMode != "" {
+		opts["collect_mode"] = a.collectionMode
+	}
+	if a.valueType != "" {
+		opts["value_type"] = a.valueType
+	}
+	if a.order != "" {
+		o := make(map[string]interface{})
+		if a.orderAsc {
+			o[a.order] = "asc"
+		} else {
+			o[a.order] = "desc"
+		}
+		opts["order"] = o
+	}
+	if len(a.includeTerms) > 0 {
+		opts["include"] = a.includeTerms
+	}
+	if a.includePattern != "" {
+		if a.includeFlags == nil || *a.includeFlags == 0 {
+			opts["include"] = a.includePattern
+		} else {
+			p := make(map[string]interface{})
+			p["pattern"] = a.includePattern
+			p["flags"] = *a.includeFlags
+			opts["include"] = p
+		}
+	}
+	if len(a.excludeTerms) > 0 {
+		opts["exclude"] = a.excludeTerms
+	}
+	if a.excludePattern != "" {
+		if a.excludeFlags == nil || *a.excludeFlags == 0 {
+			opts["exclude"] = a.excludePattern
+		} else {
+			p := make(map[string]interface{})
+			p["pattern"] = a.excludePattern
+			p["flags"] = *a.excludeFlags
+			opts["exclude"] = p
+		}
+	}
+	if a.executionHint != "" {
+		opts["execution_hint"] = a.executionHint
+	}
+	return source
+}

+ 150 - 0
github.com/olivere/elastic/search_aggs_tophits.go

@@ -0,0 +1,150 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// TopHitsAggregation keeps track of the most relevant document
+// being aggregated. This aggregator is intended to be used as a
+// sub aggregator, so that the top matching documents
+// can be aggregated per bucket.
+//
+// It can effectively be used to group result sets by certain fields via
+// a bucket aggregator. One or more bucket aggregators determines by
+// which properties a result set get sliced into.
+//
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-top-hits-aggregation.html
+type TopHitsAggregation struct {
+	searchSource *SearchSource
+}
+
+func NewTopHitsAggregation() TopHitsAggregation {
+	a := TopHitsAggregation{
+		searchSource: NewSearchSource(),
+	}
+	return a
+}
+
+func (a TopHitsAggregation) From(from int) TopHitsAggregation {
+	a.searchSource = a.searchSource.From(from)
+	return a
+}
+
+func (a TopHitsAggregation) Size(size int) TopHitsAggregation {
+	a.searchSource = a.searchSource.Size(size)
+	return a
+}
+
+func (a TopHitsAggregation) TrackScores(trackScores bool) TopHitsAggregation {
+	a.searchSource = a.searchSource.TrackScores(trackScores)
+	return a
+}
+
+func (a TopHitsAggregation) Explain(explain bool) TopHitsAggregation {
+	a.searchSource = a.searchSource.Explain(explain)
+	return a
+}
+
+func (a TopHitsAggregation) Version(version bool) TopHitsAggregation {
+	a.searchSource = a.searchSource.Version(version)
+	return a
+}
+
+func (a TopHitsAggregation) NoFields() TopHitsAggregation {
+	a.searchSource = a.searchSource.NoFields()
+	return a
+}
+
+func (a TopHitsAggregation) FetchSource(fetchSource bool) TopHitsAggregation {
+	a.searchSource = a.searchSource.FetchSource(fetchSource)
+	return a
+}
+
+func (a TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) TopHitsAggregation {
+	a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext)
+	return a
+}
+
+func (a TopHitsAggregation) FieldDataFields(fieldDataFields ...string) TopHitsAggregation {
+	a.searchSource = a.searchSource.FieldDataFields(fieldDataFields...)
+	return a
+}
+
+func (a TopHitsAggregation) FieldDataField(fieldDataField string) TopHitsAggregation {
+	a.searchSource = a.searchSource.FieldDataField(fieldDataField)
+	return a
+}
+
+func (a TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) TopHitsAggregation {
+	a.searchSource = a.searchSource.ScriptFields(scriptFields...)
+	return a
+}
+
+func (a TopHitsAggregation) ScriptField(scriptField *ScriptField) TopHitsAggregation {
+	a.searchSource = a.searchSource.ScriptField(scriptField)
+	return a
+}
+
+func (a TopHitsAggregation) PartialFields(partialFields ...*PartialField) TopHitsAggregation {
+	a.searchSource = a.searchSource.PartialFields(partialFields...)
+	return a
+}
+
+func (a TopHitsAggregation) PartialField(partialField *PartialField) TopHitsAggregation {
+	a.searchSource = a.searchSource.PartialField(partialField)
+	return a
+}
+
+func (a TopHitsAggregation) Sort(field string, ascending bool) TopHitsAggregation {
+	a.searchSource = a.searchSource.Sort(field, ascending)
+	return a
+}
+
+func (a TopHitsAggregation) SortWithInfo(info SortInfo) TopHitsAggregation {
+	a.searchSource = a.searchSource.SortWithInfo(info)
+	return a
+}
+
+func (a TopHitsAggregation) SortBy(sorter ...Sorter) TopHitsAggregation {
+	a.searchSource = a.searchSource.SortBy(sorter...)
+	return a
+}
+
+func (a TopHitsAggregation) Highlight(highlight *Highlight) TopHitsAggregation {
+	a.searchSource = a.searchSource.Highlight(highlight)
+	return a
+}
+
+func (a TopHitsAggregation) Highlighter() *Highlight {
+	return a.searchSource.Highlighter()
+}
+
+func (a TopHitsAggregation) Source() interface{} {
+	// Example:
+	// {
+	//   "aggs": {
+	//       "top_tag_hits": {
+	//           "top_hits": {
+	//               "sort": [
+	//                   {
+	//                       "last_activity_date": {
+	//                           "order": "desc"
+	//                       }
+	//                   }
+	//               ],
+	//               "_source": {
+	//                   "include": [
+	//                       "title"
+	//                   ]
+	//               },
+	//               "size" : 1
+	//           }
+	//       }
+	//   }
+	// }
+	// This method returns only the { "top_hits" : { ... } } part.
+
+	source := make(map[string]interface{})
+	source["top_hits"] = a.searchSource.Source()
+	return source
+}

+ 111 - 0
github.com/olivere/elastic/search_aggs_value_count.go

@@ -0,0 +1,111 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// ValueCountAggregation is a single-value metrics aggregation that counts
+// the number of values that are extracted from the aggregated documents.
+// These values can be extracted either from specific fields in the documents,
+// or be generated by a provided script. Typically, this aggregator will be
+// used in conjunction with other single-value aggregations.
+// For example, when computing the avg one might be interested in the
+// number of values the average is computed over.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-aggregations-metrics-valuecount-aggregation.html
+type ValueCountAggregation struct {
+	field           string
+	script          string
+	scriptFile      string
+	lang            string
+	format          string
+	params          map[string]interface{}
+	subAggregations map[string]Aggregation
+}
+
+func NewValueCountAggregation() ValueCountAggregation {
+	a := ValueCountAggregation{
+		params:          make(map[string]interface{}),
+		subAggregations: make(map[string]Aggregation),
+	}
+	return a
+}
+
+func (a ValueCountAggregation) Field(field string) ValueCountAggregation {
+	a.field = field
+	return a
+}
+
+func (a ValueCountAggregation) Script(script string) ValueCountAggregation {
+	a.script = script
+	return a
+}
+
+func (a ValueCountAggregation) ScriptFile(scriptFile string) ValueCountAggregation {
+	a.scriptFile = scriptFile
+	return a
+}
+
+func (a ValueCountAggregation) Lang(lang string) ValueCountAggregation {
+	a.lang = lang
+	return a
+}
+
+func (a ValueCountAggregation) Format(format string) ValueCountAggregation {
+	a.format = format
+	return a
+}
+
+func (a ValueCountAggregation) Param(name string, value interface{}) ValueCountAggregation {
+	a.params[name] = value
+	return a
+}
+
+func (a ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) ValueCountAggregation {
+	a.subAggregations[name] = subAggregation
+	return a
+}
+
+func (a ValueCountAggregation) Source() interface{} {
+	// Example:
+	//	{
+	//    "aggs" : {
+	//      "grades_count" : { "value_count" : { "field" : "grade" } }
+	//    }
+	//	}
+	// This method returns only the { "value_count" : { "field" : "grade" } } part.
+
+	source := make(map[string]interface{})
+	opts := make(map[string]interface{})
+	source["value_count"] = opts
+
+	// ValuesSourceAggregationBuilder
+	if a.field != "" {
+		opts["field"] = a.field
+	}
+	if a.script != "" {
+		opts["script"] = a.script
+	}
+	if a.scriptFile != "" {
+		opts["script_file"] = a.scriptFile
+	}
+	if a.lang != "" {
+		opts["lang"] = a.lang
+	}
+	if a.format != "" {
+		opts["format"] = a.format
+	}
+	if len(a.params) > 0 {
+		opts["params"] = a.params
+	}
+
+	// AggregationBuilder (SubAggregations)
+	if len(a.subAggregations) > 0 {
+		aggsMap := make(map[string]interface{})
+		source["aggregations"] = aggsMap
+		for name, aggregate := range a.subAggregations {
+			aggsMap[name] = aggregate.Source()
+		}
+	}
+
+	return source
+}

+ 12 - 0
github.com/olivere/elastic/search_facets.go

@@ -0,0 +1,12 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Represents a glimpse into the data.
+// For more details about facets, visit:
+// http://elasticsearch.org/guide/reference/api/search/facets/
+type Facet interface {
+	Source() interface{}
+}

+ 198 - 0
github.com/olivere/elastic/search_facets_date_histogram.go

@@ -0,0 +1,198 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A specific histogram facet that can work with date field types
+// enhancing it over the regular histogram facet.
+// See:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-date-histogram-facet.html
+type DateHistogramFacet struct {
+	facetFilter                Filter
+	global                     *bool
+	nested                     string
+	mode                       string
+	keyField                   string
+	valueField                 *string
+	interval                   string
+	preZone                    string
+	preZoneAdjustLargeInterval *bool
+	postZone                   string
+	preOffset                  string
+	postOffset                 string
+	factor                     *float32
+	comparatorType             string
+	valueScript                string
+	params                     map[string]interface{}
+	lang                       string
+}
+
+func NewDateHistogramFacet() DateHistogramFacet {
+	return DateHistogramFacet{
+		params: make(map[string]interface{}),
+	}
+}
+
+func (f DateHistogramFacet) FacetFilter(filter Facet) DateHistogramFacet {
+	f.facetFilter = filter
+	return f
+}
+
+func (f DateHistogramFacet) Global(global bool) DateHistogramFacet {
+	f.global = &global
+	return f
+}
+
+func (f DateHistogramFacet) Nested(nested string) DateHistogramFacet {
+	f.nested = nested
+	return f
+}
+
+func (f DateHistogramFacet) Mode(mode string) DateHistogramFacet {
+	f.mode = mode
+	return f
+}
+
+func (f DateHistogramFacet) Field(field string) DateHistogramFacet {
+	f.keyField = field
+	return f
+}
+
+func (f DateHistogramFacet) KeyField(keyField string) DateHistogramFacet {
+	f.keyField = keyField
+	return f
+}
+
+func (f DateHistogramFacet) ValueField(valueField string) DateHistogramFacet {
+	f.valueField = &valueField
+	return f
+}
+
+func (f DateHistogramFacet) ValueScript(valueScript string) DateHistogramFacet {
+	f.valueScript = valueScript
+	return f
+}
+
+func (f DateHistogramFacet) Param(name string, value interface{}) DateHistogramFacet {
+	f.params[name] = value
+	return f
+}
+
+func (f DateHistogramFacet) Lang(lang string) DateHistogramFacet {
+	f.lang = lang
+	return f
+}
+
+// Allowed values are: "year", "quarter", "month", "week", "day",
+// "hour", "minute". It also supports time settings like "1.5h"
+// (up to "w" for weeks).
+func (f DateHistogramFacet) Interval(interval string) DateHistogramFacet {
+	f.interval = interval
+	return f
+}
+
+func (f DateHistogramFacet) PreZoneAdjustLargeInterval(preZoneAdjustLargeInterval bool) DateHistogramFacet {
+	f.preZoneAdjustLargeInterval = &preZoneAdjustLargeInterval
+	return f
+}
+
+func (f DateHistogramFacet) PreZone(preZone string) DateHistogramFacet {
+	f.preZone = preZone
+	return f
+}
+
+func (f DateHistogramFacet) PostZone(postZone string) DateHistogramFacet {
+	f.postZone = postZone
+	return f
+}
+
+func (f DateHistogramFacet) PreOffset(preOffset string) DateHistogramFacet {
+	f.preOffset = preOffset
+	return f
+}
+
+func (f DateHistogramFacet) PostOffset(postOffset string) DateHistogramFacet {
+	f.postOffset = postOffset
+	return f
+}
+
+func (f DateHistogramFacet) Factor(factor float32) DateHistogramFacet {
+	f.factor = &factor
+	return f
+}
+
+func (f DateHistogramFacet) Comparator(comparator string) DateHistogramFacet {
+	f.comparatorType = comparator
+	return f
+}
+
+func (f DateHistogramFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+	if f.facetFilter != nil {
+		source["facet_filter"] = f.facetFilter.Source()
+	}
+	if f.nested != "" {
+		source["nested"] = f.nested
+	}
+	if f.global != nil {
+		source["global"] = *f.global
+	}
+	if f.mode != "" {
+		source["mode"] = f.mode
+	}
+}
+
+func (f DateHistogramFacet) Source() interface{} {
+	/*
+			"histo1" : {
+		    "date_histogram" : {
+		        "field" : "field_name",
+		        "interval" : "day"
+		    }
+		  }
+	*/
+	source := make(map[string]interface{})
+	f.addFilterFacetAndGlobal(source)
+	facet := make(map[string]interface{})
+	source["date_histogram"] = facet
+
+	if f.valueField != nil {
+		facet["key_field"] = f.keyField
+		facet["value_field"] = *f.valueField
+	} else {
+		facet["field"] = f.keyField
+	}
+
+	if f.valueScript != "" {
+		facet["value_script"] = f.valueScript
+		if f.lang != "" {
+			facet["lang"] = f.lang
+		}
+		if len(f.params) > 0 {
+			facet["params"] = f.params
+		}
+	}
+	facet["interval"] = f.interval
+	if f.preZone != "" {
+		facet["pre_zone"] = f.preZone
+	}
+	if f.preZoneAdjustLargeInterval != nil {
+		facet["pre_zone_adjust_large_interval"] = *f.preZoneAdjustLargeInterval
+	}
+	if f.postZone != "" {
+		facet["post_zone"] = f.postZone
+	}
+	if f.preOffset != "" {
+		facet["pre_offset"] = f.preOffset
+	}
+	if f.postOffset != "" {
+		facet["post_offset"] = f.postOffset
+	}
+	if f.factor != nil {
+		facet["factor"] = *f.factor
+	}
+	if f.comparatorType != "" {
+		facet["comparator"] = f.comparatorType
+	}
+	return source
+}

+ 68 - 0
github.com/olivere/elastic/search_facets_filter.go

@@ -0,0 +1,68 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// A filter facet (not to be confused with a facet filter) allows you
+// to return a count of the hits matching the filter.
+// The filter itself can be expressed using the Query DSL.
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-filter-facet.html
+type FilterFacet struct {
+	facetFilter Filter
+	global      *bool
+	nested      string
+	mode        string
+	filter      Filter
+}
+
+func NewFilterFacet() FilterFacet {
+	return FilterFacet{}
+}
+
+func (f FilterFacet) FacetFilter(filter Facet) FilterFacet {
+	f.facetFilter = filter
+	return f
+}
+
+func (f FilterFacet) Global(global bool) FilterFacet {
+	f.global = &global
+	return f
+}
+
+func (f FilterFacet) Nested(nested string) FilterFacet {
+	f.nested = nested
+	return f
+}
+
+func (f FilterFacet) Mode(mode string) FilterFacet {
+	f.mode = mode
+	return f
+}
+
+func (f FilterFacet) Filter(filter Filter) FilterFacet {
+	f.filter = filter
+	return f
+}
+
+func (f FilterFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+	if f.facetFilter != nil {
+		source["facet_filter"] = f.facetFilter.Source()
+	}
+	if f.nested != "" {
+		source["nested"] = f.nested
+	}
+	if f.global != nil {
+		source["global"] = *f.global
+	}
+	if f.mode != "" {
+		source["mode"] = f.mode
+	}
+}
+
+func (f FilterFacet) Source() interface{} {
+	source := make(map[string]interface{})
+	f.addFilterFacetAndGlobal(source)
+	source["filter"] = f.filter.Source()
+	return source
+}

+ 202 - 0
github.com/olivere/elastic/search_facets_geo_distance.go

@@ -0,0 +1,202 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// The geo_distance facet is a facet providing information for ranges of
+// distances from a provided geo_point including count of the number of hits
+// that fall within each range, and aggregation information (like total).
+// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets-geo-distance-facet.html
+type GeoDistanceFacet struct {
+	facetFilter    Filter
+	global         *bool
+	nested         string
+	mode           string
+	fieldName      string
+	valueFieldName string
+	lat            float64
+	lon            float64
+	geoHash        string
+	geoDistance    string
+	unit           string
+	params         map[string]interface{}
+	valueScript    string
+	lang           string
+	entries        []geoDistanceFacetEntry
+}
+
+func NewGeoDistanceFacet() GeoDistanceFacet {
+	return GeoDistanceFacet{
+		params:  make(map[string]interface{}),
+		entries: make([]geoDistanceFacetEntry, 0),
+	}
+}
+
+func (f GeoDistanceFacet) FacetFilter(filter Facet) GeoDistanceFacet {
+	f.facetFilter = filter
+	return f
+}
+
+func (f GeoDistanceFacet) Global(global bool) GeoDistanceFacet {
+	f.global = &global
+	return f
+}
+
+func (f GeoDistanceFacet) Nested(nested string) GeoDistanceFacet {
+	f.nested = nested
+	return f
+}
+
+func (f GeoDistanceFacet) Mode(mode string) GeoDistanceFacet {
+	f.mode = mode
+	return f
+}
+
+func (f GeoDistanceFacet) Field(fieldName string) GeoDistanceFacet {
+	f.fieldName = fieldName
+	return f
+}
+
+func (f GeoDistanceFacet) ValueField(valueFieldName string) GeoDistanceFacet {
+	f.valueFieldName = valueFieldName
+	return f
+}
+
+func (f GeoDistanceFacet) ValueScript(valueScript string) GeoDistanceFacet {
+	f.valueScript = valueScript
+	return f
+}
+
+func (f GeoDistanceFacet) Lang(lang string) GeoDistanceFacet {
+	f.lang = lang
+	return f
+}
+
+func (f GeoDistanceFacet) ScriptParam(name string, value interface{}) GeoDistanceFacet {
+	f.params[name] = value
+	return f
+}
+
+func (f GeoDistanceFacet) Point(lat, lon float64) GeoDistanceFacet {
+	f.lat = lat
+	f.lon = lon
+	return f
+}
+
+func (f GeoDistanceFacet) Lat(lat float64) GeoDistanceFacet {
+	f.lat = lat
+	return f
+}
+
+func (f GeoDistanceFacet) Lon(lon float64) GeoDistanceFacet {
+	f.lon = lon
+	return f
+}
+
+func (f GeoDistanceFacet) GeoHash(geoHash string) GeoDistanceFacet {
+	f.geoHash = geoHash
+	return f
+}
+
+func (f GeoDistanceFacet) GeoDistance(geoDistance string) GeoDistanceFacet {
+	f.geoDistance = geoDistance
+	return f
+}
+
+func (f GeoDistanceFacet) AddRange(from, to float64) GeoDistanceFacet {
+	f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: to})
+	return f
+}
+
+func (f GeoDistanceFacet) AddUnboundedTo(from float64) GeoDistanceFacet {
+	f.entries = append(f.entries, geoDistanceFacetEntry{From: from, To: nil})
+	return f
+}
+
+func (f GeoDistanceFacet) AddUnboundedFrom(to float64) GeoDistanceFacet {
+	f.entries = append(f.entries, geoDistanceFacetEntry{From: nil, To: to})
+	return f
+}
+
+func (f GeoDistanceFacet) Unit(distanceUnit string) GeoDistanceFacet {
+	f.unit = distanceUnit
+	return f
+}
+
+func (f GeoDistanceFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+	if f.facetFilter != nil {
+		source["facet_filter"] = f.facetFilter.Source()
+	}
+	if f.nested != "" {
+		source["nested"] = f.nested
+	}
+	if f.global != nil {
+		source["global"] = *f.global
+	}
+	if f.mode != "" {
+		source["mode"] = f.mode
+	}
+}
+
+func (f GeoDistanceFacet) Source() interface{} {
+	source := make(map[string]interface{})
+	f.addFilterFacetAndGlobal(source)
+	opts := make(map[string]interface{})
+	source["geo_distance"] = opts
+
+	if f.geoHash != "" {
+		opts[f.fieldName] = f.geoHash
+	} else {
+		opts[f.fieldName] = []float64{f.lat, f.lon}
+	}
+	if f.valueFieldName != "" {
+		opts["value_field"] = f.valueFieldName
+	}
+	if f.valueScript != "" {
+		opts["value_script"] = f.valueScript
+		if f.lang != "" {
+			opts["lang"] = f.lang
+		}
+		if len(f.params) > 0 {
+			opts["params"] = f.params
+		}
+	}
+
+	ranges := make([]interface{}, 0)
+	for _, ent := range f.entries {
+		r := make(map[string]interface{})
+		if ent.From != nil {
+			switch from := ent.From.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["from"] = from
+			case string:
+				r["from"] = from
+			}
+		}
+		if ent.To != nil {
+			switch to := ent.To.(type) {
+			case int, int16, int32, int64, float32, float64:
+				r["to"] = to
+			case string:
+				r["to"] = to
+			}
+		}
+		ranges = append(ranges, r)
+	}
+	opts["ranges"] = ranges
+
+	if f.unit != "" {
+		opts["unit"] = f.unit
+	}
+	if f.geoDistance != "" {
+		opts["distance_type"] = f.geoDistance
+	}
+
+	return source
+}
+
+type geoDistanceFacetEntry struct {
+	From interface{}
+	To   interface{}
+}

+ 110 - 0
github.com/olivere/elastic/search_facets_histogram.go

@@ -0,0 +1,110 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Histogram Facet
+// See: http://www.elasticsearch.org/guide/reference/api/search/facets/histogram-facet.html
+type HistogramFacet struct {
+	facetFilter    Filter
+	global         *bool
+	nested         string
+	mode           string
+	keyField       string
+	valueField     string
+	interval       int64
+	timeInterval   string
+	comparatorType string
+}
+
+func NewHistogramFacet() HistogramFacet {
+	return HistogramFacet{
+		interval: -1,
+	}
+}
+
+func (f HistogramFacet) FacetFilter(filter Facet) HistogramFacet {
+	f.facetFilter = filter
+	return f
+}
+
+func (f HistogramFacet) Global(global bool) HistogramFacet {
+	f.global = &global
+	return f
+}
+
+func (f HistogramFacet) Nested(nested string) HistogramFacet {
+	f.nested = nested
+	return f
+}
+
+func (f HistogramFacet) Mode(mode string) HistogramFacet {
+	f.mode = mode
+	return f
+}
+
+func (f HistogramFacet) Field(field string) HistogramFacet {
+	f.keyField = field
+	return f
+}
+
+func (f HistogramFacet) KeyField(keyField string) HistogramFacet {
+	f.keyField = keyField
+	return f
+}
+
+func (f HistogramFacet) ValueField(valueField string) HistogramFacet {
+	f.valueField = valueField
+	return f
+}
+
+func (f HistogramFacet) Interval(interval int64) HistogramFacet {
+	f.interval = interval
+	return f
+}
+
+func (f HistogramFacet) TimeInterval(timeInterval string) HistogramFacet {
+	f.timeInterval = timeInterval
+	return f
+}
+
+func (f HistogramFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+	if f.facetFilter != nil {
+		source["facet_filter"] = f.facetFilter.Source()
+	}
+	if f.nested != "" {
+		source["nested"] = f.nested
+	}
+	if f.global != nil {
+		source["global"] = *f.global
+	}
+	if f.mode != "" {
+		source["mode"] = f.mode
+	}
+}
+
+func (f HistogramFacet) Source() interface{} {
+	source := make(map[string]interface{})
+	f.addFilterFacetAndGlobal(source)
+	opts := make(map[string]interface{})
+	source["histogram"] = opts
+
+	if f.valueField != "" {
+		opts["key_field"] = f.keyField
+		opts["value_field"] = f.valueField
+	} else {
+		opts["field"] = f.keyField
+	}
+	if f.timeInterval != "" {
+		opts["time_interval"] = f.timeInterval
+	} else {
+		opts["interval"] = f.interval
+	}
+
+	if f.comparatorType != "" {
+		opts["comparator"] = f.comparatorType
+	}
+
+	return source
+}

+ 120 - 0
github.com/olivere/elastic/search_facets_histogram_script.go

@@ -0,0 +1,120 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Histogram Facet
+// See: http://www.elasticsearch.org/guide/reference/api/search/facets/histogram-facet.html
+type HistogramScriptFacet struct {
+	facetFilter    Filter
+	global         *bool
+	nested         string
+	mode           string
+	lang           string
+	keyField       string
+	keyScript      string
+	valueScript    string
+	params         map[string]interface{}
+	interval       int64
+	comparatorType string
+}
+
+func NewHistogramScriptFacet() HistogramScriptFacet {
+	return HistogramScriptFacet{
+		interval: -1,
+		params:   make(map[string]interface{}),
+	}
+}
+
+func (f HistogramScriptFacet) FacetFilter(filter Facet) HistogramScriptFacet {
+	f.facetFilter = filter
+	return f
+}
+
+func (f HistogramScriptFacet) Global(global bool) HistogramScriptFacet {
+	f.global = &global
+	return f
+}
+
+func (f HistogramScriptFacet) Nested(nested string) HistogramScriptFacet {
+	f.nested = nested
+	return f
+}
+
+func (f HistogramScriptFacet) Mode(mode string) HistogramScriptFacet {
+	f.mode = mode
+	return f
+}
+
+func (f HistogramScriptFacet) KeyField(keyField string) HistogramScriptFacet {
+	f.keyField = keyField
+	return f
+}
+
+func (f HistogramScriptFacet) KeyScript(keyScript string) HistogramScriptFacet {
+	f.keyScript = keyScript
+	return f
+}
+
+func (f HistogramScriptFacet) ValueScript(valueScript string) HistogramScriptFacet {
+	f.valueScript = valueScript
+	return f
+}
+
+func (f HistogramScriptFacet) Interval(interval int64) HistogramScriptFacet {
+	f.interval = interval
+	return f
+}
+
+func (f HistogramScriptFacet) Param(name string, value interface{}) HistogramScriptFacet {
+	f.params[name] = value
+	return f
+}
+
+func (f HistogramScriptFacet) Comparator(comparatorType string) HistogramScriptFacet {
+	f.comparatorType = comparatorType
+	return f
+}
+
+func (f HistogramScriptFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+	if f.facetFilter != nil {
+		source["facet_filter"] = f.facetFilter.Source()
+	}
+	if f.nested != "" {
+		source["nested"] = f.nested
+	}
+	if f.global != nil {
+		source["global"] = *f.global
+	}
+	if f.mode != "" {
+		source["mode"] = f.mode
+	}
+}
+
+func (f HistogramScriptFacet) Source() interface{} {
+	source := make(map[string]interface{})
+	f.addFilterFacetAndGlobal(source)
+	opts := make(map[string]interface{})
+	source["histogram"] = opts
+
+	if f.keyField != "" {
+		opts["key_field"] = f.keyField
+	} else if f.keyScript != "" {
+		opts["key_script"] = f.keyScript
+	}
+	opts["value_script"] = f.valueScript
+	if f.lang != "" {
+		opts["lang"] = f.lang
+	}
+	if f.interval > 0 {
+		opts["interval"] = f.interval
+	}
+	if len(f.params) > 0 {
+		opts["params"] = f.params
+	}
+	if f.comparatorType != "" {
+		opts["comparator"] = f.comparatorType
+	}
+	return source
+}

+ 66 - 0
github.com/olivere/elastic/search_facets_query.go

@@ -0,0 +1,66 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Query Facet
+// See: http://www.elasticsearch.org/guide/reference/api/search/facets/query-facet.html
+type QueryFacet struct {
+	facetFilter Filter
+	global      *bool
+	nested      string
+	mode        string
+	query       Query
+}
+
+func NewQueryFacet() QueryFacet {
+	return QueryFacet{}
+}
+
+func (f QueryFacet) FacetFilter(filter Facet) QueryFacet {
+	f.facetFilter = filter
+	return f
+}
+
+func (f QueryFacet) Global(global bool) QueryFacet {
+	f.global = &global
+	return f
+}
+
+func (f QueryFacet) Nested(nested string) QueryFacet {
+	f.nested = nested
+	return f
+}
+
+func (f QueryFacet) Mode(mode string) QueryFacet {
+	f.mode = mode
+	return f
+}
+
+func (f QueryFacet) Query(query Query) QueryFacet {
+	f.query = query
+	return f
+}
+
+func (f QueryFacet) addFilterFacetAndGlobal(source map[string]interface{}) {
+	if f.facetFilter != nil {
+		source["facet_filter"] = f.facetFilter.Source()
+	}
+	if f.nested != "" {
+		source["nested"] = f.nested
+	}
+	if f.global != nil {
+		source["global"] = *f.global
+	}
+	if f.mode != "" {
+		source["mode"] = f.mode
+	}
+}
+
+func (f QueryFacet) Source() interface{} {
+	source := make(map[string]interface{})
+	f.addFilterFacetAndGlobal(source)
+	source["query"] = f.query.Source()
+	return source
+}

Niektoré súbory nie sú zobrazené, pretože je v týchto rozdielových dátach zmenené mnoho súborov