@@ -0,0 +1,48 @@
+{
+ "task": [
+ {
+ "cron": "0 51 8,10,12,14,16,18,20,22,0,2,4,6 ? * ?",
+ "tjscope": "-3,h",
+ "min": 100,
+ "max": 50000,
+ "type": "alert",
+ "name": "es工作日预警3小时内范围100~4万"
+ },
+ "cron": "0 18 12,18 ? * 7,1",
+ "tjscope": "-6,h",
+ "min": 50,
+ "max": 100000,
+ "name": "es周末预警6小时内范围50~10万"
+ "cron": "0 00 10 * * ?",
+ "tjscope": "-1,d",
+ "type": "report",
+ "name": "es日报",
+ "mgo": "172.17.4.187:27082,172.17.145.163:27083|SJZY_Rbid_ProG|SJZY@8Pro3gR79aM|qfw|bidding"
+ "cron": "30 5 9 ? * SAT",
+ "tjscope": "-7,d",
+ "name": "es周报",
+ }
+ ],
+ "esAddr": "http://172.17.145.170:9800",
+ "esIndex": "bidding",
+ "esAddr2": "http://172.17.4.184:19805",
+ "esIndex2": "bidding",
+ "username": "es_all",
+ "password": "TopJkO2E_d1x",
+ "esAddr3": "http://172.17.4.184:19905",
+ "esIndex3": "bidding",
+ "username3": "jybid",
+ "password3": "Top2023_JEB01i@31",
+ "jkmail": {
+ "to":"renzheng@topnet.net.cn,zhangjinkun@topnet.net.cn,maxiaoshan@topnet.net.cn,wangjianghan@topnet.net.cn,chenmingzhu@jianyu360.com,zhengkun@topnet.net.cn,dongzhaorui@topnet.net.cn,lizongze@topnet.net.cn,wangchengcheng@topnet.net.cn",
+ "api": "http://172.17.145.179:19281/_send/_mail"
+}
@@ -0,0 +1,27 @@
+# How to contribute
+
+Elastic is an open-source project and we are looking forward to each
+contribution.
+## Your Pull Request
+To make it easy to review and understand your changes, please keep the
+following things in mind before submitting your pull request:
+* Work on the latest possible state of `olivere/elastic`.
+* Create a branch dedicated to your change.
+* If possible, write a test case which confirms your change.
+* Make sure your changes and your tests work with all recent versions of
+ Elasticsearch. At the moment, we're targeting the current and the previous
+ release, e.g. the 1.4 and the 1.3 branch.
+* Test your changes before creating a pull request (`go test ./...`).
+* Don't mix several features or bug fixes in one pull request.
+* Create a meaningful commit message.
+* Explain your change, e.g. provide a link to the issue you are fixing and
+ probably a link to the Elasticsearch documentation and/or source code.
+* Format your source with `go fmt`.
+## Additional Resources
+* [GitHub documentation](http://help.github.com/)
+* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+Copyright © 2012-2015 Oliver Eilhard
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the “Software”), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
@@ -0,0 +1,388 @@
+# Elastic
+Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
+[Go](http://www.golang.org/) programming language.
+[](https://travis-ci.org/olivere/elastic)
+[](https://godoc.org/github.com/olivere/elastic)
+[](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
+See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
+## Releases
+### Current version
+This is the source code of the current version of Elastic (version 2).
+### Earlier versions
+If you came from an earlier version and found that you cannot update, don't
+worry. Earlier versions are still available. All you need to do is go-get
+them and change your import path. See below for details. Here's what you
+need to do to use Elastic version 1:
+```sh
+$ go get gopkg.in/olivere/elastic.v1
+```
+Then change your import path:
+```go
+import "gopkg.in/olivere/elastic.v1"
+## Status
+We use Elastic in production since 2012. Although Elastic is quite stable
+from our experience, we don't have a stable API yet. The reason for this
+is that Elasticsearch changes quite often and at a fast pace.
+At this moment we focus on features, not on a stable API.
+Having said that, there have been no big API changes that required you
+to rewrite your application big time.
+More often than not it's renaming APIs and adding/removing features
+so that we are in sync with the Elasticsearch API.
+Elastic supports and has been tested in production with
+the following Elasticsearch versions: 0.90, 1.0, 1.1, 1.2, 1.3, and 1.4.
+Elasticsearch has quite a few features. A lot of them are
+not yet implemented in Elastic (see below for details).
+I add features and APIs as required. It's straightforward
+to implement missing pieces. I'm accepting pull requests :-)
+Having said that, I hope you find the project useful.
+## Usage
+The first thing you do is to create a Client. The client connects to
+Elasticsearch on http://127.0.0.1:9200 by default.
+You typically create one client for your app. Here's a complete example.
+// Create a client
+client, err := elastic.NewClient()
+if err != nil {
+ // Handle error
+// Create an index
+_, err = client.CreateIndex("twitter").Do()
+ panic(err)
+// Add a document to the index
+tweet := Tweet{User: "olivere", Message: "Take Five"}
+_, err = client.Index().
+ Index("twitter").
+ Type("tweet").
+ Id("1").
+ BodyJson(tweet).
+ Do()
+// Search with a term query
+termQuery := elastic.NewTermQuery("user", "olivere")
+searchResult, err := client.Search().
+ Index("twitter"). // search in index "twitter"
+ Query(&termQuery). // specify the query
+ Sort("user", true). // sort by "user" field, ascending
+ From(0).Size(10). // take documents 0-9
+ Debug(true). // print request and response to stdout
+ Pretty(true). // pretty print request and response JSON
+ Do() // execute
+// searchResult is of type SearchResult and returns hits, suggestions,
+// and all kinds of other information from Elasticsearch.
+fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+// Each is a convenience function that iterates over hits in a search result.
+// It makes sure you don't need to check for nil values in the response.
+// However, it ignores errors in serialization. If you want full control
+// over iterating the hits, see below.
+var ttyp Tweet
+for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+ t := item.(Tweet)
+ fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+// TotalHits is another convenience function that works even when something goes wrong.
+fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+// Here's how you iterate through results with full control over each step.
+if searchResult.Hits != nil {
+ fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+ // Iterate through results
+ for _, hit := range searchResult.Hits.Hits {
+ // hit.Index contains the name of the index
+ // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+ var t Tweet
+ err := json.Unmarshal(*hit.Source, &t)
+ if err != nil {
+ // Deserialization failed
+ // Work with tweet
+} else {
+ // No hits
+ fmt.Print("Found no tweets\n")
+// Delete the index again
+_, err = client.DeleteIndex("twitter").Do()
+See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
+## API Status
+Here's the current API status.
+### APIs
+- [x] Search (most queries, filters, facets, aggregations etc. are implemented: see below)
+- [x] Index
+- [x] Get
+- [x] Delete
+- [x] Delete By Query
+- [x] Update
+- [x] Multi Get
+- [x] Bulk
+- [ ] Bulk UDP
+- [ ] Term vectors
+- [ ] Multi term vectors
+- [x] Count
+- [ ] Validate
+- [x] Explain
+- [x] Search
+- [ ] Search shards
+- [x] Search template
+- [x] Facets (most are implemented, see below)
+- [x] Aggregates (most are implemented, see below)
+- [x] Multi Search
+- [x] Percolate
+- [ ] More like this
+- [ ] Benchmark
+### Indices
+- [x] Create index
+- [x] Delete index
+- [x] Get index
+- [x] Indices exists
+- [x] Open/close index
+- [x] Put mapping
+- [x] Get mapping
+- [ ] Get field mapping
+- [x] Types exist
+- [x] Delete mapping
+- [x] Index aliases
+- [ ] Update indices settings
+- [x] Get settings
+- [ ] Analyze
+- [x] Index templates
+- [ ] Warmers
+- [ ] Status
+- [x] Indices stats
+- [ ] Indices segments
+- [ ] Indices recovery
+- [ ] Clear cache
+- [x] Flush
+- [x] Refresh
+- [x] Optimize
+- [ ] Upgrade
+### Snapshot and Restore
+- [ ] Snapshot
+- [ ] Restore
+- [ ] Snapshot status
+- [ ] Monitoring snapshot/restore progress
+- [ ] Partial restore
+### Cat APIs
+Not implemented. Those are better suited for operating with Elasticsearch
+on the command line.
+### Cluster
+- [x] Health
+- [x] State
+- [ ] Stats
+- [ ] Pending cluster tasks
+- [ ] Cluster reroute
+- [ ] Cluster update settings
+- [ ] Nodes stats
+- [x] Nodes info
+- [ ] Nodes hot_threads
+- [ ] Nodes shutdown
+### Query DSL
+#### Queries
+- [x] `match`
+- [x] `multi_match`
+- [x] `bool`
+- [x] `boosting`
+- [ ] `common_terms`
+- [ ] `constant_score`
+- [x] `dis_max`
+- [x] `filtered`
+- [x] `fuzzy_like_this_query` (`flt`)
+- [x] `fuzzy_like_this_field_query` (`flt_field`)
+- [x] `function_score`
+- [x] `fuzzy`
+- [ ] `geo_shape`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `match_all`
+- [x] `mlt`
+- [x] `mlt_field`
+- [x] `nested`
+- [x] `prefix`
+- [x] `query_string`
+- [x] `simple_query_string`
+- [x] `range`
+- [x] `regexp`
+- [ ] `span_first`
+- [ ] `span_multi_term`
+- [ ] `span_near`
+- [ ] `span_not`
+- [ ] `span_or`
+- [ ] `span_term`
+- [x] `term`
+- [x] `terms`
+- [ ] `top_children`
+- [x] `wildcard`
+- [ ] `minimum_should_match`
+- [ ] `multi_term_query_rewrite`
+- [x] `template_query`
+#### Filters
+- [x] `and`
+- [x] `exists`
+- [ ] `geo_bounding_box`
+- [x] `geo_distance`
+- [ ] `geo_distance_range`
+- [x] `geo_polygon`
+- [ ] `geoshape`
+- [ ] `geohash`
+- [x] `limit`
+- [x] `missing`
+- [x] `not`
+- [x] `or`
+- [x] `query`
+- [ ] `script`
+- [x] `type`
+### Facets
+- [x] Terms
+- [x] Range
+- [x] Histogram
+- [x] Date Histogram
+- [x] Filter
+- [x] Query
+- [x] Statistical
+- [x] Terms Stats
+- [x] Geo Distance
+### Aggregations
+- [x] min
+- [x] max
+- [x] sum
+- [x] avg
+- [x] stats
+- [x] extended stats
+- [x] value count
+- [x] percentiles
+- [x] percentile ranks
+- [x] cardinality
+- [x] geo bounds
+- [x] top hits
+- [ ] scripted metric
+- [x] global
+- [x] filter
+- [x] filters
+- [x] missing
+- [x] nested
+- [x] reverse nested
+- [x] children
+- [x] terms
+- [x] significant terms
+- [x] range
+- [x] date range
+- [x] ipv4 range
+- [x] histogram
+- [x] date histogram
+- [x] geo distance
+- [x] geohash grid
+### Sorting
+- [x] Sort by score
+- [x] Sort by field
+- [x] Sort by geo distance
+- [x] Sort by script
+### Scan
+Scrolling through documents (e.g. `search_type=scan`) are implemented via
+the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well.
+## How to contribute
+Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
+## Credits
+Thanks a lot for the great folks working hard on
+[Elasticsearch](http://www.elasticsearch.org/)
+and
+[Go](http://www.golang.org/).
+## LICENSE
+MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
+or the LICENSE file provided in the repository for details.
@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+package elastic
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+)
+type AliasService struct {
+ client *Client
+ actions []aliasAction
+ pretty bool
+type aliasAction struct {
+ // "add" or "remove"
+ Type string
+ // Index name
+ Index string
+ // Alias name
+ Alias string
+ // Filter
+ Filter *Filter
+func NewAliasService(client *Client) *AliasService {
+ builder := &AliasService{
+ client: client,
+ actions: make([]aliasAction, 0),
+ return builder
+func (s *AliasService) Pretty(pretty bool) *AliasService {
+ s.pretty = pretty
+ return s
+func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
+ action := aliasAction{Type: "add", Index: indexName, Alias: aliasName}
+ s.actions = append(s.actions, action)
+func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter *Filter) *AliasService {
+ action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter}
+func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
+ action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName}
+func (s *AliasService) Do() (*AliasResult, error) {
+ // Build url
+ path := "/_aliases"
+ // Parameters
+ params := make(url.Values)
+ if s.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+ // Actions
+ body := make(map[string]interface{})
+ actionsJson := make([]interface{}, 0)
+ for _, action := range s.actions {
+ actionJson := make(map[string]interface{})
+ detailsJson := make(map[string]interface{})
+ detailsJson["index"] = action.Index
+ detailsJson["alias"] = action.Alias
+ if action.Filter != nil {
+ detailsJson["filter"] = (*action.Filter).Source()
+ actionJson[action.Type] = detailsJson
+ actionsJson = append(actionsJson, actionJson)
+ body["actions"] = actionsJson
+ // Get response
+ res, err := s.client.PerformRequest("POST", path, params, body)
+ return nil, err
+ // Return results
+ ret := new(AliasResult)
+ if err := json.Unmarshal(res.Body, ret); err != nil {
+ return ret, nil
+// -- Result of an alias request.
+type AliasResult struct {
+ Acknowledged bool `json:"acknowledged"`
@@ -0,0 +1,123 @@
+ "testing"
+const (
+ testAliasName = "elastic-test-alias"
+func TestAliasLifecycle(t *testing.T) {
+ var err error
+ client := setupTestClientAndCreateIndex(t)
+ // Some tweets
+ tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
+ tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ // Add tweets to first index
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ t.Fatal(err)
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+ // Add tweets to second index
+ _, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ // Flush
+ _, err = client.Flush().Index(testIndexName).Do()
+ _, err = client.Flush().Index(testIndexName2).Do()
+ /*
+ // Alias should not yet exist
+ aliasesResult1, err := client.Aliases().Do()
+ if len(aliasesResult1.Indices) != 0 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 0, len(aliasesResult1.Indices))
+ */
+ // Add both indices to a new alias
+ aliasCreate, err := client.Alias().
+ Add(testIndexName, testAliasName).
+ Add(testIndexName2, testAliasName).
+ //Pretty(true).
+ if !aliasCreate.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
+ // Search should return all 3 tweets
+ matchAll := NewMatchAllQuery()
+ searchResult1, err := client.Search().Index(testAliasName).Query(&matchAll).Do()
+ if searchResult1.Hits == nil {
+ t.Errorf("expected SearchResult.Hits != nil; got nil")
+ if searchResult1.Hits.TotalHits != 3 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits)
+ // Alias should return both indices
+ aliasesResult2, err := client.Aliases().Do()
+ if len(aliasesResult2.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
+ // Remove first index should remove two tweets, so should only yield 1
+ aliasRemove1, err := client.Alias().
+ Remove(testIndexName, testAliasName).
+ if !aliasRemove1.Acknowledged {
+ t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
+ searchResult2, err := client.Search().Index(testAliasName).Query(&matchAll).Do()
+ if searchResult2.Hits == nil {
+ if searchResult2.Hits.TotalHits != 1 {
+ t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits)
@@ -0,0 +1,160 @@
+ "strings"
+ "github.com/olivere/elastic/uritemplates"
+type AliasesService struct {
+ indices []string
+func NewAliasesService(client *Client) *AliasesService {
+ builder := &AliasesService{
+ indices: make([]string, 0),
+func (s *AliasesService) Pretty(pretty bool) *AliasesService {
+func (s *AliasesService) Index(indexName string) *AliasesService {
+ s.indices = append(s.indices, indexName)
+func (s *AliasesService) Indices(indexNames ...string) *AliasesService {
+ s.indices = append(s.indices, indexNames...)
+func (s *AliasesService) Do() (*AliasesResult, error) {
+ path := "/"
+ // Indices part
+ indexPart := make([]string, 0)
+ for _, index := range s.indices {
+ index, err = uritemplates.Expand("{index}", map[string]string{
+ "index": index,
+ })
+ indexPart = append(indexPart, index)
+ path += strings.Join(indexPart, ",")
+ // TODO Add types here
+ // Search
+ path += "/_aliases"
+ res, err := s.client.PerformRequest("GET", path, params, nil)
+ // {
+ // "indexName" : {
+ // "aliases" : {
+ // "alias1" : { },
+ // "alias2" : { }
+ // }
+ // },
+ // "indexName2" : {
+ // ...
+ indexMap := make(map[string]interface{})
+ if err := json.Unmarshal(res.Body, &indexMap); err != nil {
+ // Each (indexName, _)
+ ret := &AliasesResult{
+ Indices: make(map[string]indexResult),
+ for indexName, indexData := range indexMap {
+ indexOut, found := ret.Indices[indexName]
+ if !found {
+ indexOut = indexResult{Aliases: make([]aliasResult, 0)}
+ // { "aliases" : { ... } }
+ indexDataMap, ok := indexData.(map[string]interface{})
+ if ok {
+ aliasesData, ok := indexDataMap["aliases"].(map[string]interface{})
+ for aliasName, _ := range aliasesData {
+ aliasRes := aliasResult{AliasName: aliasName}
+ indexOut.Aliases = append(indexOut.Aliases, aliasRes)
+ ret.Indices[indexName] = indexOut
+type AliasesResult struct {
+ Indices map[string]indexResult
+type indexResult struct {
+ Aliases []aliasResult
+type aliasResult struct {
+ AliasName string
+func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
+ indices := make([]string, 0)
+ for indexName, indexInfo := range ar.Indices {
+ for _, aliasInfo := range indexInfo.Aliases {
+ if aliasInfo.AliasName == aliasName {
+ indices = append(indices, indexName)
+ return indices
+func (ir indexResult) HasAlias(aliasName string) bool {
+ for _, alias := range ir.Aliases {
+ if alias.AliasName == aliasName {
+ return true
+ return false
@@ -0,0 +1,146 @@
+func TestAliases(t *testing.T) {
+ aliasesResult1, err := client.Aliases().
+ Indices(testIndexName, testIndexName2).
+ if len(aliasesResult1.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices))
+ for indexName, indexDetails := range aliasesResult1.Indices {
+ if len(indexDetails.Aliases) != 0 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
+ // Alias should now exist
+ aliasesResult2, err := client.Aliases().
+ for indexName, indexDetails := range aliasesResult2.Indices {
+ if len(indexDetails.Aliases) != 1 {
+ t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
+ // Check the reverse function:
+ indexInfo1, found := aliasesResult2.Indices[testIndexName]
+ t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
+ aliasFound := indexInfo1.HasAlias(testAliasName)
+ if !aliasFound {
+ t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound)
+ indexInfo2, found := aliasesResult2.Indices[testIndexName2]
+ aliasFound = indexInfo2.HasAlias(testAliasName)
+ t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound)
+ // Alias should now exist only for index 2
+ aliasesResult3, err := client.Aliases().Indices(testIndexName, testIndexName2).Do()
+ if len(aliasesResult3.Indices) != 2 {
+ t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices))
+ for indexName, indexDetails := range aliasesResult3.Indices {
+ if indexName == testIndexName {
+ } else if indexName == testIndexName2 {
+ } else {
+ t.Errorf("got index %s", indexName)
@@ -0,0 +1,301 @@
+ "bytes"
+ "errors"
+type BulkService struct {
+ index string
+ _type string
+ requests []BulkableRequest
+ //replicationType string
+ //consistencyLevel string
+ timeout string
+ refresh *bool
+func NewBulkService(client *Client) *BulkService {
+ builder := &BulkService{
+ requests: make([]BulkableRequest, 0),
+func (s *BulkService) reset() {
+ s.requests = make([]BulkableRequest, 0)
+func (s *BulkService) Index(index string) *BulkService {
+ s.index = index
+func (s *BulkService) Type(_type string) *BulkService {
+ s._type = _type
+func (s *BulkService) Timeout(timeout string) *BulkService {
+ s.timeout = timeout
+func (s *BulkService) Refresh(refresh bool) *BulkService {
+ s.refresh = &refresh
+func (s *BulkService) Pretty(pretty bool) *BulkService {
+func (s *BulkService) Add(r BulkableRequest) *BulkService {
+ s.requests = append(s.requests, r)
+func (s *BulkService) NumberOfActions() int {
+ return len(s.requests)
+func (s *BulkService) bodyAsString() (string, error) {
+ buf := bytes.NewBufferString("")
+ for _, req := range s.requests {
+ source, err := req.Source()
+ return "", err
+ for _, line := range source {
+ _, err := buf.WriteString(fmt.Sprintf("%s\n", line))
+ return "", nil
+ return buf.String(), nil
+func (s *BulkService) Do() (*BulkResponse, error) {
+ // No actions?
+ if s.NumberOfActions() == 0 {
+ return nil, errors.New("elastic: No bulk actions to commit")
+ // Get body
+ body, err := s.bodyAsString()
+ if s.index != "" {
+ index, err := uritemplates.Expand("{index}", map[string]string{
+ "index": s.index,
+ path += index + "/"
+ if s._type != "" {
+ typ, err := uritemplates.Expand("{type}", map[string]string{
+ "type": s._type,
+ path += typ + "/"
+ path += "_bulk"
+ if s.refresh != nil {
+ params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+ if s.timeout != "" {
+ params.Set("timeout", s.timeout)
+ ret := new(BulkResponse)
+ // Reset so the request can be reused
+ s.reset()
+// BulkResponse is a response to a bulk execution.
+//
+// Example:
+// {
+// "took":3,
+// "errors":false,
+// "items":[{
+// "index":{
+// "_index":"index1",
+// "_type":"tweet",
+// "_id":"1",
+// "_version":3,
+// "status":201
+// }
+// },{
+// "_index":"index2",
+// "_id":"2",
+// "status":200
+// "delete":{
+// "_version":4,
+// "status":200,
+// "found":true
+// "update":{
+// }]
+type BulkResponse struct {
+ Took int `json:"took,omitempty"`
+ Errors bool `json:"errors,omitempty"`
+ Items []map[string]*BulkResponseItem `json:"items,omitempty"`
+// BulkResponseItem is the result of a single bulk request.
+type BulkResponseItem struct {
+ Index string `json:"_index,omitempty"`
+ Type string `json:"_type,omitempty"`
+ Id string `json:"_id,omitempty"`
+ Version int `json:"_version,omitempty"`
+ Status int `json:"status,omitempty"`
+ Found bool `json:"found,omitempty"`
+ Error string `json:"error,omitempty"`
+// Indexed returns all bulk request results of "index" actions.
+func (r *BulkResponse) Indexed() []*BulkResponseItem {
+ return r.ByAction("index")
+// Created returns all bulk request results of "create" actions.
+func (r *BulkResponse) Created() []*BulkResponseItem {
+ return r.ByAction("create")
+// Updated returns all bulk request results of "update" actions.
+func (r *BulkResponse) Updated() []*BulkResponseItem {
+ return r.ByAction("update")
+// Deleted returns all bulk request results of "delete" actions.
+func (r *BulkResponse) Deleted() []*BulkResponseItem {
+ return r.ByAction("delete")
+// ByAction returns all bulk request results of a certain action,
+// e.g. "index" or "delete".
+func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
+ if r.Items == nil {
+ return nil
+ items := make([]*BulkResponseItem, 0)
+ for _, item := range r.Items {
+ if result, found := item[action]; found {
+ items = append(items, result)
+ return items
+// ById returns all bulk request results of a given document id,
+// regardless of the action ("index", "delete" etc.).
+func (r *BulkResponse) ById(id string) []*BulkResponseItem {
+ for _, result := range item {
+ if result.Id == id {
+// Failed returns those items of a bulk response that have errors,
+// i.e. those that don't have a status code between 200 and 299.
+func (r *BulkResponse) Failed() []*BulkResponseItem {
+ errors := make([]*BulkResponseItem, 0)
+ if !(result.Status >= 200 && result.Status <= 299) {
+ errors = append(errors, result)
+ return errors
+// Succeeded returns those items of a bulk response that have no errors,
+// i.e. those have a status code between 200 and 299.
+func (r *BulkResponse) Succeeded() []*BulkResponseItem {
+ succeeded := make([]*BulkResponseItem, 0)
+ if result.Status >= 200 && result.Status <= 299 {
+ succeeded = append(succeeded, result)
+ return succeeded
@@ -0,0 +1,112 @@
+// -- Bulk delete request --
+// Bulk request to remove document from Elasticsearch.
+type BulkDeleteRequest struct {
+ BulkableRequest
+ typ string
+ id string
+ routing string
+ version int64 // default is MATCH_ANY
+ versionType string // default is "internal"
+func NewBulkDeleteRequest() *BulkDeleteRequest {
+ return &BulkDeleteRequest{}
+func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
+ r.index = index
+ return r
+func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
+ r.typ = typ
+func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
+ r.id = id
+func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
+ r.routing = routing
+func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest {
+ r.refresh = &refresh
+func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
+ r.version = version
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
+ r.versionType = versionType
+func (r *BulkDeleteRequest) String() string {
+ lines, err := r.Source()
+ if err == nil {
+ return strings.Join(lines, "\n")
+ return fmt.Sprintf("error: %v", err)
+func (r *BulkDeleteRequest) Source() ([]string, error) {
+ lines := make([]string, 1)
+ source := make(map[string]interface{})
+ deleteCommand := make(map[string]interface{})
+ if r.index != "" {
+ deleteCommand["_index"] = r.index
+ if r.typ != "" {
+ deleteCommand["_type"] = r.typ
+ if r.id != "" {
+ deleteCommand["_id"] = r.id
+ if r.routing != "" {
+ deleteCommand["_routing"] = r.routing
+ if r.version > 0 {
+ deleteCommand["_version"] = r.version
+ if r.versionType != "" {
+ deleteCommand["_version_type"] = r.versionType
+ if r.refresh != nil {
+ deleteCommand["refresh"] = *r.refresh
+ source["delete"] = deleteCommand
+ body, err := json.Marshal(source)
+ lines[0] = string(body)
+ return lines, nil
@@ -0,0 +1,42 @@
+func TestBulkDeleteRequestSerialization(t *testing.T) {
+ tests := []struct {
+ Request BulkableRequest
+ Expected []string
+ }{
+ // #0
+ Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"),
+ Expected: []string{
+ `{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ for i, test := range tests {
+ lines, err := test.Request.Source()
+ t.Fatalf("case #%d: expected no error, got: %v", i, err)
+ if lines == nil {
+ t.Fatalf("case #%d: expected lines, got nil", i)
+ if len(lines) != len(test.Expected) {
+ t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+ for j, line := range lines {
+ if line != test.Expected[j] {
+ t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
@@ -0,0 +1,173 @@
+// Bulk request to add document to Elasticsearch.
+type BulkIndexRequest struct {
+ opType string
+ parent string
+ timestamp string
+ ttl int64
+ doc interface{}
+func NewBulkIndexRequest() *BulkIndexRequest {
+ return &BulkIndexRequest{
+ opType: "index",
+func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
+func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
+func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
+func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
+ r.opType = opType
+func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
+func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
+ r.parent = parent
+func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest {
+ r.timestamp = timestamp
+func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest {
+ r.ttl = ttl
+func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest {
+func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
+func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
+func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
+ r.doc = doc
+func (r *BulkIndexRequest) String() string {
+func (r *BulkIndexRequest) Source() ([]string, error) {
+ // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
+ // { "field1" : "value1" }
+ lines := make([]string, 2)
+ // "index" ...
+ command := make(map[string]interface{})
+ indexCommand := make(map[string]interface{})
+ indexCommand["_index"] = r.index
+ indexCommand["_type"] = r.typ
+ indexCommand["_id"] = r.id
+ indexCommand["_routing"] = r.routing
+ if r.parent != "" {
+ indexCommand["_parent"] = r.parent
+ if r.timestamp != "" {
+ indexCommand["_timestamp"] = r.timestamp
+ if r.ttl > 0 {
+ indexCommand["_ttl"] = r.ttl
+ indexCommand["_version"] = r.version
+ indexCommand["_version_type"] = r.versionType
+ indexCommand["refresh"] = *r.refresh
+ command[r.opType] = indexCommand
+ line, err := json.Marshal(command)
+ lines[0] = string(line)
+ // "field1" ...
+ if r.doc != nil {
+ switch t := r.doc.(type) {
+ default:
+ body, err := json.Marshal(r.doc)
+ lines[1] = string(body)
+ case json.RawMessage:
+ lines[1] = string(t)
+ case *json.RawMessage:
+ lines[1] = string(*t)
+ case string:
+ lines[1] = t
+ case *string:
+ lines[1] = *t
+ lines[1] = "{}"
@@ -0,0 +1,63 @@
+ "time"
+func TestBulkIndexRequestSerialization(t *testing.T) {
+ Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1").
+ Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+ `{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+ // #1
+ Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1").
+ `{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ // #2
+ Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").
@@ -0,0 +1,17 @@
+// -- Bulkable request (index/update/delete) --
+// Generic interface to bulkable requests.
+type BulkableRequest interface {
+ fmt.Stringer
+ Source() ([]string, error)
@@ -0,0 +1,370 @@
+func TestBulk(t *testing.T) {
+ tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+ index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+ index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+ delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+ bulkRequest := client.Bulk()
+ bulkRequest = bulkRequest.Add(index1Req)
+ bulkRequest = bulkRequest.Add(index2Req)
+ bulkRequest = bulkRequest.Add(delete1Req)
+ if bulkRequest.NumberOfActions() != 3 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
+ bulkResponse, err := bulkRequest.Do()
+ if bulkResponse == nil {
+ t.Errorf("expected bulkResponse to be != nil; got nil")
+ if bulkRequest.NumberOfActions() != 0 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+ // Document with Id="1" should not exist
+ exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+ if exists {
+ t.Errorf("expected exists %v; got %v", false, exists)
+ // Document with Id="2" should exist
+ exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do()
+ if !exists {
+ t.Errorf("expected exists %v; got %v", true, exists)
+ // Update
+ updateDoc := struct {
+ Retweets int `json:"retweets"`
+ 42,
+ update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc)
+ bulkRequest = client.Bulk()
+ bulkRequest = bulkRequest.Add(update1Req)
+ if bulkRequest.NumberOfActions() != 1 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
+ bulkResponse, err = bulkRequest.Do()
+ // Document with Id="1" should have a retweets count of 42
+ doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do()
+ if doc == nil {
+ t.Fatal("expected doc to be != nil; got nil")
+ if !doc.Found {
+ t.Fatalf("expected doc to be found; got found = %v", doc.Found)
+ if doc.Source == nil {
+ t.Fatal("expected doc source to be != nil; got nil")
+ var updatedTweet tweet
+ err = json.Unmarshal(*doc.Source, &updatedTweet)
+ if updatedTweet.Retweets != 42 {
+ t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets)
+func TestBulkWithIndexSetOnClient(t *testing.T) {
+ bulkRequest := client.Bulk().Index(testIndexName).Type("tweet")
+func TestBulkRequestsSerialization(t *testing.T) {
+ index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+ update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
+ Doc(struct {
+ Retweets: 42,
+ bulkRequest = bulkRequest.Add(update2Req)
+ if bulkRequest.NumberOfActions() != 4 {
+ t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
+ expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
+{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
+{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
+{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
+{"doc":{"retweets":42}}
+`
+ got, err := bulkRequest.bodyAsString()
+ t.Fatalf("expected no error, got: %v", err)
+ if got != expected {
+ t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+ // Run the bulk request
+ if bulkResponse.Took == 0 {
+ t.Errorf("expected took to be > 0; got %d", bulkResponse.Took)
+ if bulkResponse.Errors {
+ t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors)
+ if len(bulkResponse.Items) != 4 {
+ t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items))
+ // Indexed actions
+ indexed := bulkResponse.Indexed()
+ if indexed == nil {
+ t.Fatal("expected indexed to be != nil; got nil")
+ if len(indexed) != 1 {
+ t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed))
+ if indexed[0].Id != "1" {
+ t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id)
+ if indexed[0].Status != 201 {
+ t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status)
+ // Created actions
+ created := bulkResponse.Created()
+ if created == nil {
+ t.Fatal("expected created to be != nil; got nil")
+ if len(created) != 1 {
+ t.Fatalf("expected len(created) == %d; got %d", 1, len(created))
+ if created[0].Id != "2" {
+ t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id)
+ if created[0].Status != 201 {
+ t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status)
+ // Deleted actions
+ deleted := bulkResponse.Deleted()
+ if deleted == nil {
+ t.Fatal("expected deleted to be != nil; got nil")
+ if len(deleted) != 1 {
+ t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted))
+ if deleted[0].Id != "1" {
+ t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id)
+ if deleted[0].Status != 200 {
+ t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status)
+ if !deleted[0].Found {
+ t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found)
+ // Updated actions
+ updated := bulkResponse.Updated()
+ if updated == nil {
+ t.Fatal("expected updated to be != nil; got nil")
+ if len(updated) != 1 {
+ t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated))
+ if updated[0].Id != "2" {
+ t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id)
+ if updated[0].Status != 200 {
+ t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status)
+ if updated[0].Version != 2 {
+ t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version)
+ // Succeeded actions
+ succeeded := bulkResponse.Succeeded()
+ if succeeded == nil {
+ t.Fatal("expected succeeded to be != nil; got nil")
+ if len(succeeded) != 4 {
+ t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded))
+ // ById
+ id1Results := bulkResponse.ById("1")
+ if id1Results == nil {
+ t.Fatal("expected id1Results to be != nil; got nil")
+ if len(id1Results) != 2 {
+ t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results))
+ if id1Results[0].Id != "1" {
+ t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id)
+ if id1Results[0].Status != 201 {
+ t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status)
+ if id1Results[0].Version != 1 {
+ t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version)
+ if id1Results[1].Id != "1" {
+ t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id)
+ if id1Results[1].Status != 200 {
+ t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status)
+ if id1Results[1].Version != 2 {
+ t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version)
+func TestFailedBulkRequests(t *testing.T) {
+ js := `{
+ "took" : 2,
+ "errors" : true,
+ "items" : [ {
+ "index" : {
+ "_index" : "elastic-test",
+ "_type" : "tweet",
+ "_id" : "1",
+ "_version" : 1,
+ "status" : 201
+ }, {
+ "create" : {
+ "_id" : "2",
+ "status" : 423,
+ "error" : "Locked"
+ "delete" : {
+ "_version" : 2,
+ "status" : 404,
+ "found" : false
+ "update" : {
+ "status" : 200
+ } ]
+}`
+ var resp BulkResponse
+ err := json.Unmarshal([]byte(js), &resp)
+ failed := resp.Failed()
+ if len(failed) != 2 {
+ t.Errorf("expected %d failed items; got: %d", 2, len(failed))
@@ -0,0 +1,244 @@
+// Bulk request to update document in Elasticsearch.
+type BulkUpdateRequest struct {
+ script string
+ scriptType string
+ scriptLang string
+ scriptParams map[string]interface{}
+ retryOnConflict *int
+ upsert interface{}
+ docAsUpsert *bool
+func NewBulkUpdateRequest() *BulkUpdateRequest {
+ return &BulkUpdateRequest{}
+func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) Script(script string) *BulkUpdateRequest {
+ r.script = script
+func (r *BulkUpdateRequest) ScriptType(scriptType string) *BulkUpdateRequest {
+ r.scriptType = scriptType
+func (r *BulkUpdateRequest) ScriptLang(scriptLang string) *BulkUpdateRequest {
+ r.scriptLang = scriptLang
+func (r *BulkUpdateRequest) ScriptParams(params map[string]interface{}) *BulkUpdateRequest {
+ r.scriptParams = params
+func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
+ r.retryOnConflict = &retryOnConflict
+func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
+ r.docAsUpsert = &docAsUpsert
+func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
+ r.upsert = doc
+func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest {
+func (r *BulkUpdateRequest) String() string {
+func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) {
+ switch t := data.(type) {
+ body, err := json.Marshal(data)
+ return string(body), nil
+ return string(t), nil
+ return string(*t), nil
+ return t, nil
+ return *t, nil
+func (r BulkUpdateRequest) Source() ([]string, error) {
+ // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+ // { "doc" : { "field1" : "value1", ... } }
+ // or
+ // { "script" : { ... } }
+ // "update" ...
+ updateCommand := make(map[string]interface{})
+ updateCommand["_index"] = r.index
+ updateCommand["_type"] = r.typ
+ updateCommand["_id"] = r.id
+ updateCommand["_routing"] = r.routing
+ updateCommand["_parent"] = r.parent
+ updateCommand["_timestamp"] = r.timestamp
+ updateCommand["_ttl"] = r.ttl
+ updateCommand["_version"] = r.version
+ updateCommand["_version_type"] = r.versionType
+ updateCommand["refresh"] = *r.refresh
+ if r.retryOnConflict != nil {
+ updateCommand["_retry_on_conflict"] = *r.retryOnConflict
+ if r.upsert != nil {
+ updateCommand["upsert"] = r.upsert
+ command["update"] = updateCommand
+ // 2nd line: {"doc" : { ... }} or {"script": {...}}
+ if r.docAsUpsert != nil {
+ source["doc_as_upsert"] = *r.docAsUpsert
+ // {"doc":{...}}
+ source["doc"] = r.doc
+ } else if r.script != "" {
+ // {"script":...}
+ source["script"] = r.script
+ if r.scriptLang != "" {
+ source["lang"] = r.scriptLang
+ if r.scriptType != "" {
+ source["script_type"] = r.scriptType
+ if r.scriptParams != nil && len(r.scriptParams) > 0 {
+ source["params"] = r.scriptParams
+ lines[1], err = r.getSourceAsString(source)
@@ -0,0 +1,79 @@
+func TestBulkUpdateRequestSerialization(t *testing.T) {
+ Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct {
+ Counter int64 `json:"counter"`
+ Counter: 42,
+ }),
+ `{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+ `{"doc":{"counter":42}}`,
+ Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+ RetryOnConflict(3).
+ DocAsUpsert(true).
+ `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
+ `{"doc":{"counter":42},"doc_as_upsert":true}`,
+ Script(`ctx._source.retweets += param1`).
+ ScriptLang("js").
+ ScriptParams(map[string]interface{}{"param1": 42}).
+ Upsert(struct {
+ `{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet","upsert":{"counter":42}}}`,
+ `{"lang":"js","params":{"param1":42},"script":"ctx._source.retweets += param1"}`,
@@ -0,0 +1,28 @@
+import "net/url"
+// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
+// remove anything but scheme, userinfo, host, and port. It also removes the
+// slash at the end. It also skips invalid URLs or URLs that do not use
+// protocol http or https.
+// http://127.0.0.1:9200/path?query=1 -> http://127.0.0.1:9200
+func canonicalize(rawurls ...string) []string {
+ canonicalized := make([]string, 0)
+ for _, rawurl := range rawurls {
+ u, err := url.Parse(rawurl)
+ if err == nil && (u.Scheme == "http" || u.Scheme == "https") {
+ u.Fragment = ""
+ u.Path = ""
+ u.RawQuery = ""
+ canonicalized = append(canonicalized, u.String())
+ return canonicalized
@@ -0,0 +1,41 @@
+ "reflect"
+func TestCanonicalize(t *testing.T) {
+ Input []string
+ Output []string
+ Input: []string{"http://127.0.0.1/"},
+ Output: []string{"http://127.0.0.1"},
+ Input: []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"},
+ Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"},
+ Input: []string{"http://user:secret@127.0.0.1/path?query=1#fragment"},
+ Output: []string{"http://user:secret@127.0.0.1"},
+ Input: []string{"https://somewhere.on.mars:9999/path?query=1#fragment"},
+ Output: []string{"https://somewhere.on.mars:9999"},
+ for _, test := range tests {
+ got := canonicalize(test.Input...)
+ if !reflect.DeepEqual(got, test.Output) {
+ t.Errorf("expected %v; got: %v", test.Output, got)
@@ -0,0 +1,96 @@
+ "log"
+var (
+ _ = fmt.Print
+ _ = log.Print
+ _ = strings.Index
+ _ = uritemplates.Expand
+ _ = url.Parse
+// ClearScrollService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-request-scroll.html.
+type ClearScrollService struct {
+ scrollId []string
+ bodyJson interface{}
+ bodyString string
+// NewClearScrollService creates a new ClearScrollService.
+func NewClearScrollService(client *Client) *ClearScrollService {
+ return &ClearScrollService{
+ scrollId: make([]string, 0),
+// ScrollId is a list of scroll IDs to clear.
+// Use _all to clear all search contexts.
+func (s *ClearScrollService) ScrollId(scrollId ...string) *ClearScrollService {
+ s.scrollId = make([]string, 0)
+ s.scrollId = append(s.scrollId, scrollId...)
+// buildURL builds the URL for the operation.
+func (s *ClearScrollService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/_search/scroll", map[string]string{})
+ return "", url.Values{}, err
+ return path, url.Values{}, nil
+// Validate checks if the operation is valid.
+func (s *ClearScrollService) Validate() error {
+// Do executes the operation.
+func (s *ClearScrollService) Do() (*ClearScrollResponse, error) {
+ // Check pre-conditions
+ if err := s.Validate(); err != nil {
+ // Get URL for request
+ path, params, err := s.buildURL()
+ // Setup HTTP request body
+ body := strings.Join(s.scrollId, ",")
+ // Get HTTP response
+ res, err := s.client.PerformRequest("DELETE", path, params, body)
+ // Return operation response
+ ret := new(ClearScrollResponse)
+// ClearScrollResponse is the response of ClearScrollService.Do.
+type ClearScrollResponse struct {
@@ -0,0 +1,72 @@
+ _ "net/http"
+func TestClearScroll(t *testing.T) {
+ tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+ // Add all documents
+ _, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+ _, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+ // Match all should return all documents
+ res, err := client.Scroll(testIndexName).Size(1).Do()
+ if res == nil {
+ t.Errorf("expected results != nil; got nil")
+ if res.ScrollId == "" {
+ t.Errorf("expected scrollId in results; got %q", res.ScrollId)
+ // Search should succeed
+ _, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do()
+ // Clear scroll id
+ clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do()
+ if clearScrollRes == nil {
+ t.Error("expected results != nil; got nil")
+ // Search result should fail
+ t.Fatalf("expected scroll to fail")
@@ -0,0 +1,1240 @@
+ "math/rand"
+ "net/http"
+ "net/http/httputil"
+ "regexp"
+ "sync"
+ // Version is the current version of Elastic.
+ Version = "2.0.0"
+ // DefaultUrl is the default endpoint of Elasticsearch on the local machine.
+ // It is used e.g. when initializing a new Client without a specific URL.
+ DefaultURL = "http://127.0.0.1:9200"
+ // DefaultScheme is the default protocol scheme to use when sniffing
+ // the Elasticsearch cluster.
+ DefaultScheme = "http"
+ // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default.
+ DefaultHealthcheckEnabled = true
+ // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits
+ // for a response from Elasticsearch on startup, i.e. when creating a
+ // client. After the client is started, a shorter timeout is commonly used
+ // (its default is specified in DefaultHealthcheckTimeout).
+ DefaultHealthcheckTimeoutStartup = 5 * time.Second
+ // DefaultHealthcheckTimeout specifies the time a running client waits for
+ // a response from Elasticsearch. Notice that the healthcheck timeout
+ // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup).
+ DefaultHealthcheckTimeout = 1 * time.Second
+ // DefaultHealthcheckInterval is the default interval between
+ // two health checks of the nodes in the cluster.
+ DefaultHealthcheckInterval = 60 * time.Second
+ // DefaultSnifferEnabled specifies if the sniffer is enabled by default.
+ DefaultSnifferEnabled = true
+ // DefaultSnifferInterval is the interval between two sniffing procedures,
+ // i.e. the lookup of all nodes in the cluster and their addition/removal
+ // from the list of actual connections.
+ DefaultSnifferInterval = 15 * time.Minute
+ // DefaultSnifferTimeoutStartup is the default timeout for the sniffing
+ // process that is initiated while creating a new client. For subsequent
+ // sniffing processes, DefaultSnifferTimeout is used (by default).
+ DefaultSnifferTimeoutStartup = 5 * time.Second
+ // DefaultSnifferTimeout is the default timeout after which the
+ // sniffing process times out. Notice that for the initial sniffing
+ // process, DefaultSnifferTimeoutStartup is used.
+ DefaultSnifferTimeout = 2 * time.Second
+ // DefaultMaxRetries is the number of retries for a single request after
+ // Elastic will give up and return an error. It is zero by default, so
+ // retry is disabled by default.
+ DefaultMaxRetries = 0
+ // ErrNoClient is raised when no Elasticsearch node is available.
+ ErrNoClient = errors.New("no Elasticsearch node available")
+ // ErrRetry is raised when a request cannot be executed after the configured
+ // number of retries.
+ ErrRetry = errors.New("cannot connect after several retries")
+// ClientOptionFunc is a function that configures a Client.
+// It is used in NewClient.
+type ClientOptionFunc func(*Client) error
+// Client is an Elasticsearch client. Create one by calling NewClient.
+type Client struct {
+ c *http.Client // net/http Client to use for requests
+ connsMu sync.RWMutex // connsMu guards the next block
+ conns []*conn // all connections
+ cindex int // index into conns
+ mu sync.RWMutex // guards the next block
+ urls []string // set of URLs passed initially to the client
+ running bool // true if the client's background processes are running
+ errorlog *log.Logger // error log for critical messages
+ infolog *log.Logger // information log for e.g. response times
+ tracelog *log.Logger // trace log for debugging
+ maxRetries int // max. number of retries
+ scheme string // http or https
+ healthcheckEnabled bool // healthchecks enabled or disabled
+ healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup
+ healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch
+ healthcheckInterval time.Duration // interval between healthchecks
+ healthcheckStop chan bool // notify healthchecker to stop, and notify back
+ snifferEnabled bool // sniffer enabled or disabled
+ snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup
+ snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API
+ snifferInterval time.Duration // interval between sniffing
+ snifferStop chan bool // notify sniffer to stop, and notify back
+ decoder Decoder // used to decode data sent from Elasticsearch
+// NewClient creates a new client to work with Elasticsearch.
+// The caller can configure the new client by passing configuration options
+// to the func.
+// client, err := elastic.NewClient(
+// elastic.SetURL("http://localhost:9200", "http://localhost:9201"),
+// elastic.SetMaxRetries(10))
+// If no URL is configured, Elastic uses DefaultURL by default.
+// If the sniffer is enabled (the default), the new client then sniffes
+// the cluster via the Nodes Info API
+// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info).
+// It uses the URLs specified by the caller. The caller is responsible
+// to only pass a list of URLs of nodes that belong to the same cluster.
+// This sniffing process is run on startup and periodically.
+// Use SnifferInterval to set the interval between two sniffs (default is
+// 15 minutes). In other words: By default, the client will find new nodes
+// in the cluster and remove those that are no longer available every
+// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient.
+// The list of nodes found in the sniffing process will be used to make
+// connections to the REST API of Elasticsearch. These nodes are also
+// periodically checked in a shorter time frame. This process is called
+// a health check. By default, a health check is done every 60 seconds.
+// You can set a shorter or longer interval by SetHealthcheckInterval.
+// Disabling health checks is not recommended, but can be done by
+// SetHealthcheck(false).
+// Connections are automatically marked as dead or healthy while
+// making requests to Elasticsearch. When a request fails, Elastic will
+// retry up to a maximum number of retries configured with SetMaxRetries.
+// Retries are disabled by default.
+// If no HttpClient is configured, then http.DefaultClient is used.
+// You can use your own http.Client with some http.Transport for
+// advanced scenarios.
+// An error is also returned when some configuration option is invalid or
+// the new client cannot sniff the cluster (if enabled).
+func NewClient(options ...ClientOptionFunc) (*Client, error) {
+ // Set up the client
+ c := &Client{
+ c: http.DefaultClient,
+ conns: make([]*conn, 0),
+ cindex: -1,
+ scheme: DefaultScheme,
+ decoder: &DefaultDecoder{},
+ maxRetries: DefaultMaxRetries,
+ healthcheckEnabled: DefaultHealthcheckEnabled,
+ healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup,
+ healthcheckTimeout: DefaultHealthcheckTimeout,
+ healthcheckInterval: DefaultHealthcheckInterval,
+ healthcheckStop: make(chan bool),
+ snifferEnabled: DefaultSnifferEnabled,
+ snifferTimeoutStartup: DefaultSnifferTimeoutStartup,
+ snifferTimeout: DefaultSnifferTimeout,
+ snifferInterval: DefaultSnifferInterval,
+ snifferStop: make(chan bool),
+ // Run the options on it
+ for _, option := range options {
+ if err := option(c); err != nil {
+ if len(c.urls) == 0 {
+ c.urls = []string{DefaultURL}
+ c.urls = canonicalize(c.urls...)
+ if c.snifferEnabled {
+ // Sniff the cluster initially
+ if err := c.sniff(c.snifferTimeoutStartup); err != nil {
+ // Do not sniff the cluster initially. Use the provided URLs instead.
+ for _, url := range c.urls {
+ c.conns = append(c.conns, newConn(url, url))
+ // Perform an initial health check and
+ // ensure that we have at least one connection available
+ if c.healthcheckEnabled {
+ c.healthcheck(c.healthcheckTimeoutStartup, true)
+ if err := c.mustActiveConn(); err != nil {
+ go c.sniffer() // periodically update cluster information
+ go c.healthchecker() // start goroutine periodically ping all nodes of the cluster
+ c.mu.Lock()
+ c.running = true
+ c.mu.Unlock()
+ return c, nil
+// SetHttpClient can be used to specify the http.Client to use when making
+// HTTP requests to Elasticsearch.
+func SetHttpClient(httpClient *http.Client) ClientOptionFunc {
+ return func(c *Client) error {
+ if httpClient != nil {
+ c.c = httpClient
+ c.c = http.DefaultClient
+// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that
+// when sniffing is enabled, these URLs are used to initially sniff the
+// cluster on startup.
+func SetURL(urls ...string) ClientOptionFunc {
+ switch len(urls) {
+ case 0:
+ c.urls = urls
+// SetScheme sets the HTTP scheme to look for when sniffing (http or https).
+// This is http by default.
+func SetScheme(scheme string) ClientOptionFunc {
+ c.scheme = scheme
+// SetSniff enables or disables the sniffer (enabled by default).
+func SetSniff(enabled bool) ClientOptionFunc {
+ c.snifferEnabled = enabled
+// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used
+// when creating a new client. The default is 5 seconds. Notice that the
+// timeout being used for subsequent sniffing processes is set with
+// SetSnifferTimeout.
+func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+ c.snifferTimeoutStartup = timeout
+// SetSnifferTimeout sets the timeout for the sniffer that finds the
+// nodes in a cluster. The default is 2 seconds. Notice that the timeout
+// used when creating a new client on startup is usually greater and can
+// be set with SetSnifferTimeoutStartup.
+func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc {
+ c.snifferTimeout = timeout
+// SetSnifferInterval sets the interval between two sniffing processes.
+// The default interval is 15 minutes.
+func SetSnifferInterval(interval time.Duration) ClientOptionFunc {
+ c.snifferInterval = interval
+// SetHealthcheck enables or disables healthchecks (enabled by default).
+func SetHealthcheck(enabled bool) ClientOptionFunc {
+ c.healthcheckEnabled = enabled
+// SetHealthcheckTimeoutStartup sets the timeout for the initial health check.
+// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup).
+// Notice that timeouts for subsequent health checks can be modified with
+// SetHealthcheckTimeout.
+func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+ c.healthcheckTimeoutStartup = timeout
+// SetHealthcheckTimeout sets the timeout for periodic health checks.
+// The default timeout is 1 second (see DefaultHealthcheckTimeout).
+// Notice that a different (usually larger) timeout is used for the initial
+// healthcheck, which is initiated while creating a new client.
+// The startup timeout can be modified with SetHealthcheckTimeoutStartup.
+func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc {
+ c.healthcheckTimeout = timeout
+// SetHealthcheckInterval sets the interval between two health checks.
+// The default interval is 60 seconds.
+func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc {
+ c.healthcheckInterval = interval
+// SetMaxRetries sets the maximum number of retries before giving up when
+// performing a HTTP request to Elasticsearch.
+func SetMaxRetries(maxRetries int) func(*Client) error {
+ if maxRetries < 0 {
+ return errors.New("MaxRetries must be greater than or equal to 0")
+ c.maxRetries = maxRetries
+// SetDecoder sets the Decoder to use when decoding data from Elasticsearch.
+// DefaultDecoder is used by default.
+func SetDecoder(decoder Decoder) func(*Client) error {
+ if decoder != nil {
+ c.decoder = decoder
+ c.decoder = &DefaultDecoder{}
+// SetErrorLog sets the logger for critical messages like nodes joining
+// or leaving the cluster or failing requests. It is nil by default.
+func SetErrorLog(logger *log.Logger) func(*Client) error {
+ c.errorlog = logger
+// SetInfoLog sets the logger for informational messages, e.g. requests
+// and their response times. It is nil by default.
+func SetInfoLog(logger *log.Logger) func(*Client) error {
+ c.infolog = logger
+// SetTraceLog specifies the log.Logger to use for output of HTTP requests
+// and responses which is helpful during debugging. It is nil by default.
+func SetTraceLog(logger *log.Logger) func(*Client) error {
+ c.tracelog = logger
+// String returns a string representation of the client status.
+func (c *Client) String() string {
+ c.connsMu.Lock()
+ conns := c.conns
+ c.connsMu.Unlock()
+ var buf bytes.Buffer
+ for i, conn := range conns {
+ if i > 0 {
+ buf.WriteString(", ")
+ buf.WriteString(conn.String())
+ return buf.String()
+// IsRunning returns true if the background processes of the client are
+// running, false otherwise.
+func (c *Client) IsRunning() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.running
+// Start starts the background processes like sniffing the cluster and
+// periodic health checks. You don't need to run Start when creating a
+// client with NewClient; the background processes are run by default.
+// If the background processes are already running, this is a no-op.
+func (c *Client) Start() {
+ if c.running {
+ c.mu.RUnlock()
+ return
+ go c.sniffer()
+ go c.healthchecker()
+ c.infof("elastic: client started")
+// Stop stops the background processes that the client is running,
+// i.e. sniffing the cluster periodically and running health checks
+// on the nodes.
+// If the background processes are not running, this is a no-op.
+func (c *Client) Stop() {
+ if !c.running {
+ c.healthcheckStop <- true
+ <-c.healthcheckStop
+ c.snifferStop <- true
+ <-c.snifferStop
+ c.running = false
+ c.infof("elastic: client stopped")
+// errorf logs to the error log.
+func (c *Client) errorf(format string, args ...interface{}) {
+ if c.errorlog != nil {
+ c.errorlog.Printf(format, args...)
+// infof logs informational messages.
+func (c *Client) infof(format string, args ...interface{}) {
+ if c.infolog != nil {
+ c.infolog.Printf(format, args...)
+// tracef logs to the trace log.
+func (c *Client) tracef(format string, args ...interface{}) {
+ if c.tracelog != nil {
+ c.tracelog.Printf(format, args...)
+// dumpRequest dumps the given HTTP request to the trace log.
+func (c *Client) dumpRequest(r *http.Request) {
+ out, err := httputil.DumpRequestOut(r, true)
+ c.tracef("%s\n", string(out))
+// dumpResponse dumps the given HTTP response to the trace log.
+func (c *Client) dumpResponse(resp *http.Response) {
+ out, err := httputil.DumpResponse(resp, true)
+// sniffer periodically runs sniff.
+func (c *Client) sniffer() {
+ timeout := c.snifferTimeout
+ interval := c.snifferInterval
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-c.snifferStop:
+ // we are asked to stop, so we signal back that we're stopping now
+ case <-ticker.C:
+ c.sniff(timeout)
+// sniff uses the Node Info API to return the list of nodes in the cluster.
+// It uses the list of URLs passed on startup plus the list of URLs found
+// by the preceding sniffing process (if sniffing is enabled).
+// If sniffing is disabled, this is a no-op.
+func (c *Client) sniff(timeout time.Duration) error {
+ if !c.snifferEnabled {
+ // Use all available URLs provided to sniff the cluster.
+ urlsMap := make(map[string]bool)
+ urls := make([]string, 0)
+ // Add all URLs provided on startup
+ urlsMap[url] = true
+ urls = append(urls, url)
+ // Add all URLs found by sniffing
+ c.connsMu.RLock()
+ for _, conn := range c.conns {
+ if !conn.IsDead() {
+ url := conn.URL()
+ if _, found := urlsMap[url]; !found {
+ c.connsMu.RUnlock()
+ if len(urls) == 0 {
+ return ErrNoClient
+ // Start sniffing on all found URLs
+ ch := make(chan []*conn, len(urls))
+ for _, url := range urls {
+ go func(url string) { ch <- c.sniffNode(url) }(url)
+ // Wait for the results to come back, or the process times out.
+ case conns := <-ch:
+ if len(conns) > 0 {
+ c.updateConns(conns)
+ case <-time.After(timeout):
+ // We get here if no cluster responds in time
+// reSniffHostAndPort is used to extract hostname and port from a result
+// from a Nodes Info API (example: "inet[/127.0.0.1:9200]").
+var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`)
+// sniffNode sniffs a single node. This method is run as a goroutine
+// in sniff. If successful, it returns the list of node URLs extracted
+// from the result of calling Nodes Info API. Otherwise, an empty array
+// is returned.
+func (c *Client) sniffNode(url string) []*conn {
+ nodes := make([]*conn, 0)
+ // Call the Nodes Info API at /_nodes/http
+ req, err := NewRequest("GET", url+"/_nodes/http")
+ return nodes
+ res, err := c.c.Do((*http.Request)(req))
+ if res.Body != nil {
+ defer res.Body.Close()
+ var info NodesInfoResponse
+ if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
+ if len(info.Nodes) > 0 {
+ switch c.scheme {
+ case "https":
+ for nodeID, node := range info.Nodes {
+ m := reSniffHostAndPort.FindStringSubmatch(node.HTTPSAddress)
+ if len(m) == 3 {
+ url := fmt.Sprintf("https://%s:%s", m[1], m[2])
+ nodes = append(nodes, newConn(nodeID, url))
+ m := reSniffHostAndPort.FindStringSubmatch(node.HTTPAddress)
+ url := fmt.Sprintf("http://%s:%s", m[1], m[2])
+// updateConns updates the clients' connections with new information
+// gather by a sniff operation.
+func (c *Client) updateConns(conns []*conn) {
+ newConns := make([]*conn, 0)
+ // Build up new connections:
+ // If we find an existing connection, use that (including no. of failures etc.).
+ // If we find a new connection, add it.
+ for _, conn := range conns {
+ var found bool
+ for _, oldConn := range c.conns {
+ if oldConn.NodeID() == conn.NodeID() {
+ // Take over the old connection
+ newConns = append(newConns, oldConn)
+ found = true
+ break
+ // New connection didn't exist, so add it to our list of new conns.
+ c.errorf("elastic: %s joined the cluster", conn.URL())
+ newConns = append(newConns, conn)
+ c.conns = newConns
+ c.cindex = -1
+// healthchecker periodically runs healthcheck.
+func (c *Client) healthchecker() {
+ timeout := c.healthcheckTimeout
+ interval := c.healthcheckInterval
+ case <-c.healthcheckStop:
+ c.healthcheck(timeout, false)
+// healthcheck does a health check on all nodes in the cluster. Depending on
+// the node state, it marks connections as dead, sets them alive etc.
+// If healthchecks are disabled and force is false, this is a no-op.
+// The timeout specifies how long to wait for a response from Elasticsearch.
+func (c *Client) healthcheck(timeout time.Duration, force bool) {
+ if !c.healthcheckEnabled && !force {
+ timeoutInMillis := int64(timeout / time.Millisecond)
+ params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis))
+ req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode())
+ if res.StatusCode >= 200 && res.StatusCode < 300 {
+ conn.MarkAsAlive()
+ conn.MarkAsDead()
+ c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode)
+ c.errorf("elastic: %s is dead", conn.URL())
+// next returns the next available connection, or ErrNoClient.
+func (c *Client) next() (*conn, error) {
+ // We do round-robin here.
+ // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients.
+ defer c.connsMu.Unlock()
+ i := 0
+ numConns := len(c.conns)
+ i += 1
+ if i > numConns {
+ break // we visited all conns: they all seem to be dead
+ c.cindex += 1
+ if c.cindex >= numConns {
+ c.cindex = 0
+ conn := c.conns[c.cindex]
+ return conn, nil
+ // TODO(oe) As a last resort, we could try to awake a dead connection here.
+ // We tried hard, but there is no node available
+ return nil, ErrNoClient
+// mustActiveConn returns nil if there is an active connection,
+// otherwise ErrNoClient is returned.
+func (c *Client) mustActiveConn() error {
+ for _, c := range c.conns {
+ if !c.IsDead() {
+// PerformRequest does a HTTP request to Elasticsearch.
+// It returns a response and an error on failure.
+func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}) (*Response, error) {
+ start := time.Now().UTC()
+ retries := c.maxRetries
+ var conn *conn
+ var req *Request
+ var resp *Response
+ var retried bool
+ // We wait between retries, using simple exponential back-off.
+ // TODO: Make this configurable, including the jitter.
+ retryWaitMsec := int64(100 + (rand.Intn(20) - 10))
+ pathWithParams := path
+ if len(params) > 0 {
+ pathWithParams += "?" + params.Encode()
+ // Get a connection
+ conn, err = c.next()
+ if err == ErrNoClient {
+ if !retried {
+ // Force a healtcheck as all connections seem to be dead.
+ retries -= 1
+ if retries <= 0 {
+ retried = true
+ time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+ retryWaitMsec += retryWaitMsec
+ continue // try again
+ c.errorf("elastic: cannot get connection from pool")
+ req, err = NewRequest(method, conn.URL()+pathWithParams)
+ c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err)
+ // Set body
+ if body != nil {
+ switch b := body.(type) {
+ req.SetBodyString(b)
+ req.SetBodyJson(body)
+ // Tracing
+ c.dumpRequest((*http.Request)(req))
+ // Check for errors
+ if err := checkResponse(res); err != nil {
+ c.dumpResponse(res)
+ // We successfully made a request with this connection
+ conn.MarkAsHealthy()
+ resp, err = c.newResponse(res)
+ duration := time.Now().UTC().Sub(start)
+ c.infof("%s %s [status:%d, request:%.3fs]",
+ strings.ToUpper(method),
+ req.URL,
+ resp.StatusCode,
+ float64(int64(duration/time.Millisecond))/1000)
+ return resp, nil
+// ElasticsearchVersion returns the version number of Elasticsearch
+// running on the given URL.
+func (c *Client) ElasticsearchVersion(url string) (string, error) {
+ res, _, err := c.Ping().URL(url).Do()
+ return res.Version.Number, nil
+// IndexNames returns the names of all indices in the cluster.
+func (c *Client) IndexNames() ([]string, error) {
+ res, err := c.IndexGetSettings().Index("_all").Do()
+ var names []string
+ for name, _ := range res {
+ names = append(names, name)
+ return names, nil
+// Ping checks if a given node in a cluster exists and (optionally)
+// returns some basic information about the Elasticsearch server,
+// e.g. the Elasticsearch version number.
+func (c *Client) Ping() *PingService {
+ return NewPingService(c)
+// CreateIndex returns a service to create a new index.
+func (c *Client) CreateIndex(name string) *CreateIndexService {
+ builder := NewCreateIndexService(c)
+ builder.Index(name)
+// DeleteIndex returns a service to delete an index.
+func (c *Client) DeleteIndex(name string) *DeleteIndexService {
+ builder := NewDeleteIndexService(c)
+// IndexExists allows to check if an index exists.
+func (c *Client) IndexExists(name string) *IndexExistsService {
+ builder := NewIndexExistsService(c)
+// TypeExists allows to check if one or more types exist in one or more indices.
+func (c *Client) TypeExists() *IndicesExistsTypeService {
+ return NewIndicesExistsTypeService(c)
+// IndexStats provides statistics on different operations happining
+// in one or more indices.
+func (c *Client) IndexStats(indices ...string) *IndicesStatsService {
+ builder := NewIndicesStatsService(c)
+ builder = builder.Index(indices...)
+// OpenIndex opens an index.
+func (c *Client) OpenIndex(name string) *OpenIndexService {
+ builder := NewOpenIndexService(c)
+// CloseIndex closes an index.
+func (c *Client) CloseIndex(name string) *CloseIndexService {
+ builder := NewCloseIndexService(c)
+// Index a document.
+func (c *Client) Index() *IndexService {
+ builder := NewIndexService(c)
+// IndexGet retrieves information about one or more indices.
+// IndexGet is only available for Elasticsearch 1.4 or later.
+func (c *Client) IndexGet() *IndicesGetService {
+ builder := NewIndicesGetService(c)
+// IndexGetSettings retrieves settings about one or more indices.
+func (c *Client) IndexGetSettings() *IndicesGetSettingsService {
+ builder := NewIndicesGetSettingsService(c)
+// Update a document.
+func (c *Client) Update() *UpdateService {
+ builder := NewUpdateService(c)
+// Delete a document.
+func (c *Client) Delete() *DeleteService {
+ builder := NewDeleteService(c)
+// DeleteByQuery deletes documents as found by a query.
+func (c *Client) DeleteByQuery() *DeleteByQueryService {
+ builder := NewDeleteByQueryService(c)
+// Get a document.
+func (c *Client) Get() *GetService {
+ builder := NewGetService(c)
+// MultiGet retrieves multiple documents in one roundtrip.
+func (c *Client) MultiGet() *MultiGetService {
+ builder := NewMultiGetService(c)
+// Exists checks if a document exists.
+func (c *Client) Exists() *ExistsService {
+ builder := NewExistsService(c)
+// Count documents.
+func (c *Client) Count(indices ...string) *CountService {
+ builder := NewCountService(c)
+ builder.Indices(indices...)
+// Search is the entry point for searches.
+func (c *Client) Search(indices ...string) *SearchService {
+ builder := NewSearchService(c)
+// Percolate allows to send a document and return matching queries.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html.
+func (c *Client) Percolate() *PercolateService {
+ builder := NewPercolateService(c)
+// MultiSearch is the entry point for multi searches.
+func (c *Client) MultiSearch() *MultiSearchService {
+ return NewMultiSearchService(c)
+// Suggest returns a service to return suggestions.
+func (c *Client) Suggest(indices ...string) *SuggestService {
+ builder := NewSuggestService(c)
+// Scan through documents. Use this to iterate inside a server process
+// where the results will be processed without returning them to a client.
+func (c *Client) Scan(indices ...string) *ScanService {
+ builder := NewScanService(c)
+// Scroll through documents. Use this to efficiently scroll through results
+// while returning the results to a client. Use Scan when you don't need
+// to return requests to a client (i.e. not paginating via request/response).
+func (c *Client) Scroll(indices ...string) *ScrollService {
+ builder := NewScrollService(c)
+// ClearScroll can be used to clear search contexts manually.
+func (c *Client) ClearScroll() *ClearScrollService {
+ builder := NewClearScrollService(c)
+// Optimize asks Elasticsearch to optimize one or more indices.
+func (c *Client) Optimize(indices ...string) *OptimizeService {
+ builder := NewOptimizeService(c)
+// Refresh asks Elasticsearch to refresh one or more indices.
+func (c *Client) Refresh(indices ...string) *RefreshService {
+ builder := NewRefreshService(c)
+// Flush asks Elasticsearch to free memory from the index and
+// flush data to disk.
+func (c *Client) Flush() *FlushService {
+ builder := NewFlushService(c)
+// Explain computes a score explanation for a query and a specific document.
+func (c *Client) Explain(index, typ, id string) *ExplainService {
+ builder := NewExplainService(c)
+ builder = builder.Index(index).Type(typ).Id(id)
+// Bulk is the entry point to mass insert/update/delete documents.
+func (c *Client) Bulk() *BulkService {
+ builder := NewBulkService(c)
+// Alias enables the caller to add and/or remove aliases.
+func (c *Client) Alias() *AliasService {
+ builder := NewAliasService(c)
+// Aliases returns aliases by index name(s).
+func (c *Client) Aliases() *AliasesService {
+ builder := NewAliasesService(c)
+// GetTemplate gets a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) GetTemplate() *GetTemplateService {
+ return NewGetTemplateService(c)
+// PutTemplate creates or updates a search template.
+func (c *Client) PutTemplate() *PutTemplateService {
+ return NewPutTemplateService(c)
+// DeleteTemplate deletes a search template.
+func (c *Client) DeleteTemplate() *DeleteTemplateService {
+ return NewDeleteTemplateService(c)
+// IndexGetTemplate gets an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService {
+ builder := NewIndicesGetTemplateService(c)
+ builder = builder.Name(names...)
+// IndexTemplateExists gets check if an index template exists.
+func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService {
+ builder := NewIndicesExistsTemplateService(c)
+ builder = builder.Name(name)
+// IndexPutTemplate creates or updates an index template.
+func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService {
+ builder := NewIndicesPutTemplateService(c)
+// IndexDeleteTemplate deletes an index template.
+func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService {
+ builder := NewIndicesDeleteTemplateService(c)
+// GetMapping gets a mapping.
+func (c *Client) GetMapping() *GetMappingService {
+ return NewGetMappingService(c)
+// PutMapping registers a mapping.
+func (c *Client) PutMapping() *PutMappingService {
+ return NewPutMappingService(c)
+// DeleteMapping deletes a mapping.
+func (c *Client) DeleteMapping() *DeleteMappingService {
+ return NewDeleteMappingService(c)
+// ClusterHealth retrieves the health of the cluster.
+func (c *Client) ClusterHealth() *ClusterHealthService {
+ return NewClusterHealthService(c)
+// ClusterState retrieves the state of the cluster.
+func (c *Client) ClusterState() *ClusterStateService {
+ return NewClusterStateService(c)
+// NodesInfo retrieves one or more or all of the cluster nodes information.
+func (c *Client) NodesInfo() *NodesInfoService {
+ return NewNodesInfoService(c)
+// Reindex returns a service that will reindex documents from a source
+// index into a target index. See
+// http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
+// for more information about reindexing.
+func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer {
+ return NewReindexer(c, sourceIndex, targetIndex)
@@ -0,0 +1,611 @@
+func findConn(s string, slice ...*conn) (int, bool) {
+ for i, t := range slice {
+ if s == t.URL() {
+ return i, true
+ return -1, false
+// -- NewClient --
+func TestClientDefaults(t *testing.T) {
+ client, err := NewClient()
+ if client.healthcheckEnabled != true {
+ t.Errorf("expected health checks to be enabled, got: %v", client.healthcheckEnabled)
+ if client.healthcheckTimeoutStartup != DefaultHealthcheckTimeoutStartup {
+ t.Errorf("expected health checks timeout on startup = %v, got: %v", DefaultHealthcheckTimeoutStartup, client.healthcheckTimeoutStartup)
+ if client.healthcheckTimeout != DefaultHealthcheckTimeout {
+ t.Errorf("expected health checks timeout = %v, got: %v", DefaultHealthcheckTimeout, client.healthcheckTimeout)
+ if client.healthcheckInterval != DefaultHealthcheckInterval {
+ t.Errorf("expected health checks interval = %v, got: %v", DefaultHealthcheckInterval, client.healthcheckInterval)
+ if client.snifferEnabled != true {
+ t.Errorf("expected sniffing to be enabled, got: %v", client.snifferEnabled)
+ if client.snifferTimeoutStartup != DefaultSnifferTimeoutStartup {
+ t.Errorf("expected sniffer timeout on startup = %v, got: %v", DefaultSnifferTimeoutStartup, client.snifferTimeoutStartup)
+ if client.snifferTimeout != DefaultSnifferTimeout {
+ t.Errorf("expected sniffer timeout = %v, got: %v", DefaultSnifferTimeout, client.snifferTimeout)
+ if client.snifferInterval != DefaultSnifferInterval {
+ t.Errorf("expected sniffer interval = %v, got: %v", DefaultSnifferInterval, client.snifferInterval)
+func TestClientWithoutURL(t *testing.T) {
+ // Two things should happen here:
+ // 1. The client starts sniffing the cluster on DefaultURL
+ // 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL
+ if len(client.conns) == 0 {
+ t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns)
+ if !isTravis() {
+ if _, found := findConn(DefaultURL, client.conns...); !found {
+ t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+func TestClientWithSingleURL(t *testing.T) {
+ client, err := NewClient(SetURL("http://localhost:9200"))
+func TestClientWithMultipleURLs(t *testing.T) {
+ client, err := NewClient(SetURL("http://localhost:9200", "http://localhost:9201"))
+ // The client should sniff both URLs, but only localhost:9200 should return nodes.
+ if len(client.conns) != 1 {
+ t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns)
+ if client.conns[0].URL() != DefaultURL {
+func TestClientSniffSuccess(t *testing.T) {
+ client, err := NewClient(SetURL("http://localhost:19200", "http://localhost:9200"))
+func TestClientSniffFailure(t *testing.T) {
+ _, err := NewClient(SetURL("http://localhost:19200", "http://localhost:19201"))
+ t.Fatalf("expected cluster to fail with no nodes found")
+func TestClientSniffDisabled(t *testing.T) {
+ client, err := NewClient(SetSniff(false), SetURL("http://localhost:9200", "http://localhost:9201"))
+ // The client should not sniff, so it should have two connections.
+ if len(client.conns) != 2 {
+ t.Fatalf("expected 2 nodes, got: %d (%v)", len(client.conns), client.conns)
+ // Make two requests, so that both connections are being used
+ for i := 0; i < len(client.conns); i++ {
+ _, err = client.Flush().Do()
+ // The first connection (localhost:9200) should now be okay.
+ if i, found := findConn("http://localhost:9200", client.conns...); !found {
+ t.Fatalf("expected connection to %q to be found", "http://localhost:9200")
+ if conn := client.conns[i]; conn.IsDead() {
+ t.Fatal("expected connection to be alive, but it is dead")
+ // The second connection (localhost:9201) should now be marked as dead.
+ if i, found := findConn("http://localhost:9201", client.conns...); !found {
+ t.Fatalf("expected connection to %q to be found", "http://localhost:9201")
+ if conn := client.conns[i]; !conn.IsDead() {
+ t.Fatal("expected connection to be dead, but it is alive")
+// -- Start and stop --
+func TestClientStartAndStop(t *testing.T) {
+ running := client.IsRunning()
+ if !running {
+ t.Fatalf("expected background processes to run; got: %v", running)
+ // Stop
+ client.Stop()
+ running = client.IsRunning()
+ if running {
+ t.Fatalf("expected background processes to be stopped; got: %v", running)
+ // Stop again => no-op
+ // Start
+ client.Start()
+ // Start again => no-op
+// -- Sniffing --
+func TestClientSniffNode(t *testing.T) {
+ ch := make(chan []*conn)
+ go func() { ch <- client.sniffNode(DefaultURL) }()
+ case nodes := <-ch:
+ if len(nodes) != 1 {
+ t.Fatalf("expected %d nodes; got: %d", 1, len(nodes))
+ pattern := `http:\/\/[\d\.]+:9200`
+ matched, err := regexp.MatchString(pattern, nodes[0].URL())
+ if !matched {
+ t.Fatalf("expected node URL pattern %q; got: %q", pattern, nodes[0].URL())
+ case <-time.After(2 * time.Second):
+ t.Fatal("expected no timeout in sniff node")
+func TestClientSniffOnDefaultURL(t *testing.T) {
+ client, _ := NewClient()
+ if client == nil {
+ t.Fatal("no client returned")
+ ch := make(chan error, 1)
+ go func() {
+ ch <- client.sniff(DefaultSnifferTimeoutStartup)
+ }()
+ case err := <-ch:
+ t.Fatalf("expected sniff to succeed; got: %v", err)
+ t.Fatalf("expected %d nodes; got: %d", 1, len(client.conns))
+ matched, err := regexp.MatchString(pattern, client.conns[0].URL())
+ t.Fatalf("expected node URL pattern %q; got: %q", pattern, client.conns[0].URL())
+ t.Fatal("expected no timeout in sniff")
+// -- Selector --
+func TestClientSelectConnHealthy(t *testing.T) {
+ client, err := NewClient(
+ SetSniff(false),
+ SetHealthcheck(false),
+ SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+ // Both are healthy, so we should get both URLs in round-robin
+ client.conns[0].MarkAsHealthy()
+ client.conns[1].MarkAsHealthy()
+ // #1: Return 1st
+ c, err := client.next()
+ if c.URL() != client.conns[0].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+ // #2: Return 2nd
+ c, err = client.next()
+ if c.URL() != client.conns[1].URL() {
+ t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+ // #3: Return 1st
+func TestClientSelectConnHealthyAndDead(t *testing.T) {
+ // 1st is healthy, second is dead
+ client.conns[1].MarkAsDead()
+ // #2: Return 1st again
+ // #3: Return 1st again and again
+func TestClientSelectConnDeadAndHealthy(t *testing.T) {
+ // 1st is dead, 2nd is healthy
+ client.conns[0].MarkAsDead()
+ // #1: Return 2nd
+ // #2: Return 2nd again
+ // #3: Return 2nd again and again
+func TestClientSelectConnAllDead(t *testing.T) {
+ // Both are dead
+ // #1: Return ErrNoClient
+ if err != ErrNoClient {
+ if c != nil {
+ t.Fatalf("expected no connection; got: %v", c)
+ // #2: Return ErrNoClient again
+ // #3: Return ErrNoClient again and again
+// -- ElasticsearchVersion --
+func TestElasticsearchVersion(t *testing.T) {
+ version, err := client.ElasticsearchVersion(DefaultURL)
+ if version == "" {
+ t.Errorf("expected a version number, got: %q", version)
+// -- IndexNames --
+func TestIndexNames(t *testing.T) {
+ names, err := client.IndexNames()
+ if len(names) == 0 {
+ t.Fatalf("expected some index names, got: %d", len(names))
+ for _, name := range names {
+ if name == testIndexName {
+ t.Fatalf("expected to find index %q; got: %v", testIndexName, found)
+// -- PerformRequest --
+func TestPerformRequest(t *testing.T) {
+ res, err := client.PerformRequest("GET", "/", nil, nil)
+ t.Fatal("expected response to be != nil")
+ ret := new(PingResult)
+ t.Fatalf("expected no error on decode; got: %v", err)
+ if ret.Status != 200 {
+ t.Errorf("expected HTTP status 200; got: %d", ret.Status)
+func TestPerformRequestWithLogger(t *testing.T) {
+ var w bytes.Buffer
+ out := log.New(&w, "LOGGER ", log.LstdFlags)
+ client, err := NewClient(SetInfoLog(out))
+ got := w.String()
+ pattern := `^LOGGER \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n`
+ matched, err := regexp.MatchString(pattern, got)
+ t.Fatalf("expected log line to match %q; got: %v", pattern, err)
+ t.Errorf("expected log line to match %q; got: %v", pattern, got)
+func TestPerformRequestWithLoggerAndTracer(t *testing.T) {
+ var lw bytes.Buffer
+ lout := log.New(&lw, "LOGGER ", log.LstdFlags)
+ var tw bytes.Buffer
+ tout := log.New(&tw, "TRACER ", log.LstdFlags)
+ client, err := NewClient(SetInfoLog(lout), SetTraceLog(tout))
+ lgot := lw.String()
+ if lgot == "" {
+ t.Errorf("expected logger output; got: %q", lgot)
+ tgot := tw.String()
+ if tgot == "" {
+ t.Errorf("expected tracer output; got: %q", tgot)
+// failingTransport will run a fail callback if it sees a given URL path prefix.
+type failingTransport struct {
+ path string // path prefix to look for
+ fail func(*http.Request) (*http.Response, error) // call when path prefix is found
+ next http.RoundTripper // next round-tripper (use http.DefaultTransport if nil)
+// RoundTrip implements a failing transport.
+func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+ if strings.HasPrefix(r.URL.Path, tr.path) && tr.fail != nil {
+ return tr.fail(r)
+ if tr.next != nil {
+ return tr.next.RoundTrip(r)
+ return http.DefaultTransport.RoundTrip(r)
+func TestPerformRequestWithMaxRetries(t *testing.T) {
+ var numFailedReqs int
+ fail := func(r *http.Request) (*http.Response, error) {
+ numFailedReqs += 1
+ return &http.Response{Request: r, StatusCode: 400}, nil
+ // Run against a failing endpoint and see if PerformRequest
+ // retries correctly.
+ tr := &failingTransport{path: "/fail", fail: fail}
+ httpClient := &http.Client{Transport: tr}
+ client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5))
+ res, err := client.PerformRequest("GET", "/fail", nil, nil)
+ t.Fatal("expected error")
+ if res != nil {
+ t.Fatal("expected no response")
+ // Connection should be marked as dead after it failed
+ if numFailedReqs != 5 {
+ t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs)
@@ -0,0 +1,16 @@
+.PHONY: build run-omega-cluster-test
+default: build
+build:
+ go build cluster-test.go
+run-omega-cluster-test:
+ go run -race cluster-test.go \
+ -nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \
+ -n=5 \
+ -retries=5 \
+ -sniff=true -sniffer=10s \
+ -healthcheck=true -healthchecker=5s \
+ -errorlog=errors.log
+# Cluster Test
+This directory contains a program you can use to test a cluster.
+Here's how:
+First, install a cluster of Elasticsearch nodes. You can install them on
+different computers, or start several nodes on a single machine.
+Build cluster-test by `go build cluster-test.go` (or build with `make`).
+Run `./cluster-test -h` to get a list of flags:
+$ ./cluster-test -h
+Usage of ./cluster-test:
+ -errorlog="": error log file
+ -healthcheck=true: enable or disable healthchecks
+ -healthchecker=1m0s: healthcheck interval
+ -index="twitter": name of ES index to use
+ -infolog="": info log file
+ -n=5: number of goroutines that run searches
+ -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')
+ -retries=0: number of retries
+ -sniff=true: enable or disable sniffer
+ -sniffer=15m0s: sniffer interval
+ -tracelog="": trace log file
+Example:
+$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log
+The above example will create an index and start some search jobs on the
+cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201,
+and http://127.0.0.1:9202.
+* It will create an index called `twitter` on the cluster (`-index=twitter`)
+* It will run 5 search jobs in parallel (`-n=5`).
+* It will retry failed requests 5 times (`-retries=5`).
+* It will sniff the cluster periodically (`-sniff=true`).
+* It will sniff the cluster every 10 seconds (`-sniffer=10s`).
+* It will perform health checks periodically (`-healthcheck=true`).
+* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`).
+* It will write an error log file (`-errorlog=error.log`).
+If you want to test Elastic with nodes going up and down, you can use a
+chaos monkey script like this and run it on the nodes of your cluster:
+#!/bin/bash
+while true
+do
+ echo "Starting ES node"
+ elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid
+ sleep `jot -r 1 10 300` # wait for 10-300s
+ echo "Stopping ES node"
+ kill -TERM `cat es.pid`
+ sleep `jot -r 1 10 60` # wait for 10-60s
+done
@@ -0,0 +1,357 @@
+package main
+ "flag"
+ "os"
+ "runtime"
+ "sync/atomic"
+ "github.com/olivere/elastic"
+type Tweet struct {
+ User string `json:"user"`
+ Message string `json:"message"`
+ Image string `json:"image,omitempty"`
+ Created time.Time `json:"created,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Location string `json:"location,omitempty"`
+ Suggest *elastic.SuggestField `json:"suggest_field,omitempty"`
+ nodes = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')")
+ n = flag.Int("n", 5, "number of goroutines that run searches")
+ index = flag.String("index", "twitter", "name of ES index to use")
+ errorlogfile = flag.String("errorlog", "", "error log file")
+ infologfile = flag.String("infolog", "", "info log file")
+ tracelogfile = flag.String("tracelog", "", "trace log file")
+ retries = flag.Int("retries", elastic.DefaultMaxRetries, "number of retries")
+ sniff = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer")
+ sniffer = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval")
+ healthcheck = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks")
+ healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval")
+func main() {
+ flag.Parse()
+ runtime.GOMAXPROCS(runtime.NumCPU())
+ if *nodes == "" {
+ log.Fatal("no nodes specified")
+ urls := strings.SplitN(*nodes, ",", -1)
+ testcase, err := NewTestCase(*index, urls)
+ log.Fatal(err)
+ testcase.SetErrorLogFile(*errorlogfile)
+ testcase.SetInfoLogFile(*infologfile)
+ testcase.SetTraceLogFile(*tracelogfile)
+ testcase.SetMaxRetries(*retries)
+ testcase.SetHealthcheck(*healthcheck)
+ testcase.SetHealthcheckInterval(*healthchecker)
+ testcase.SetSniff(*sniff)
+ testcase.SetSnifferInterval(*sniffer)
+ if err := testcase.Run(*n); err != nil {
+ select {}
+type RunInfo struct {
+ Success bool
+type TestCase struct {
+ nodes []string
+ client *elastic.Client
+ runs int64
+ failures int64
+ runCh chan RunInfo
+ errorlogfile string
+ infologfile string
+ tracelogfile string
+ maxRetries int
+ healthcheck bool
+ healthcheckInterval time.Duration
+ sniff bool
+ snifferInterval time.Duration
+func NewTestCase(index string, nodes []string) (*TestCase, error) {
+ if index == "" {
+ return nil, errors.New("no index name specified")
+ return &TestCase{
+ index: index,
+ nodes: nodes,
+ runCh: make(chan RunInfo),
+ }, nil
+func (t *TestCase) SetIndex(name string) {
+ t.index = name
+func (t *TestCase) SetErrorLogFile(name string) {
+ t.errorlogfile = name
+func (t *TestCase) SetInfoLogFile(name string) {
+ t.infologfile = name
+func (t *TestCase) SetTraceLogFile(name string) {
+ t.tracelogfile = name
+func (t *TestCase) SetMaxRetries(n int) {
+ t.maxRetries = n
+func (t *TestCase) SetSniff(enabled bool) {
+ t.sniff = enabled
+func (t *TestCase) SetSnifferInterval(d time.Duration) {
+ t.snifferInterval = d
+func (t *TestCase) SetHealthcheck(enabled bool) {
+ t.healthcheck = enabled
+func (t *TestCase) SetHealthcheckInterval(d time.Duration) {
+ t.healthcheckInterval = d
+func (t *TestCase) Run(n int) error {
+ if err := t.setup(); err != nil {
+ return err
+ for i := 1; i < n; i++ {
+ go t.search()
+ go t.monitor()
+func (t *TestCase) monitor() {
+ print := func() {
+ fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), " ")
+ case run := <-t.runCh:
+ atomic.AddInt64(&t.runs, 1)
+ if !run.Success {
+ atomic.AddInt64(&t.failures, 1)
+ fmt.Println()
+ print()
+ case <-time.After(5 * time.Second):
+ // Print stats after some inactivity
+func (t *TestCase) setup() error {
+ var errorlogger *log.Logger
+ if t.errorlogfile != "" {
+ f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
+ var infologger *log.Logger
+ if t.infologfile != "" {
+ f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ infologger = log.New(f, "", log.LstdFlags)
+ // Trace request and response details like this
+ var tracelogger *log.Logger
+ if t.tracelogfile != "" {
+ f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+ tracelogger = log.New(f, "", log.LstdFlags)
+ client, err := elastic.NewClient(
+ elastic.SetURL(t.nodes...),
+ elastic.SetErrorLog(errorlogger),
+ elastic.SetInfoLog(infologger),
+ elastic.SetTraceLog(tracelogger),
+ elastic.SetMaxRetries(t.maxRetries),
+ elastic.SetSniff(t.sniff),
+ elastic.SetSnifferInterval(t.snifferInterval),
+ elastic.SetHealthcheck(t.healthcheck),
+ elastic.SetHealthcheckInterval(t.healthcheckInterval))
+ t.client = client
+ // Use the IndexExists service to check if a specified index exists.
+ exists, err := t.client.IndexExists(t.index).Do()
+ deleteIndex, err := t.client.DeleteIndex(t.index).Do()
+ if !deleteIndex.Acknowledged {
+ return errors.New("delete index not acknowledged")
+ // Create a new index.
+ createIndex, err := t.client.CreateIndex(t.index).Do()
+ if !createIndex.Acknowledged {
+ return errors.New("create index not acknowledged")
+ // Index a tweet (using JSON serialization)
+ tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+ _, err = t.client.Index().
+ Index(t.index).
+ BodyJson(tweet1).
+ // Index a second tweet (by string)
+ tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+ Id("2").
+ BodyString(tweet2).
+ // Flush to make sure the documents got written.
+ _, err = t.client.Flush().Index(t.index).Do()
+func (t *TestCase) search() {
+ // Loop forever to check for connection issues
+ // Get tweet with specified ID
+ get1, err := t.client.Get().
+ //failf("Get failed: %v", err)
+ t.runCh <- RunInfo{Success: false}
+ continue
+ if !get1.Found {
+ //log.Printf("Document %s not found\n", "1")
+ //fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+ // Search with a term query
+ termQuery := elastic.NewTermQuery("user", "olivere")
+ searchResult, err := t.client.Search().
+ Index(t.index). // search in index t.index
+ //failf("Search failed: %v\n", err)
+ // searchResult is of type SearchResult and returns hits, suggestions,
+ // and all kinds of other information from Elasticsearch.
+ //fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+ // Number of hits
+ if searchResult.Hits != nil {
+ //fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+ var tweet Tweet
+ err := json.Unmarshal(*hit.Source, &tweet)
+ //failf("Deserialize failed: %v\n", err)
+ //fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+ //fmt.Print("Found no tweets\n")
+ t.runCh <- RunInfo{Success: true}
+ // Sleep some time
+ time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
@@ -0,0 +1,185 @@
+// ClusterHealthService allows to get the status of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-health.html.
+type ClusterHealthService struct {
+ waitForStatus string
+ level string
+ local *bool
+ masterTimeout string
+ waitForActiveShards *int
+ waitForNodes string
+ waitForRelocatingShards *int
+// NewClusterHealthService creates a new ClusterHealthService.
+func NewClusterHealthService(client *Client) *ClusterHealthService {
+ return &ClusterHealthService{client: client, indices: make([]string, 0)}
+// Index limits the information returned to a specific index.
+func (s *ClusterHealthService) Index(index string) *ClusterHealthService {
+ s.indices = make([]string, 0)
+ s.indices = append(s.indices, index)
+// Indices limits the information returned to specific indices.
+func (s *ClusterHealthService) Indices(indices ...string) *ClusterHealthService {
+ s.indices = append(s.indices, indices...)
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
+ s.masterTimeout = masterTimeout
+// Timeout specifies an explicit operation timeout.
+func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
+// WaitForActiveShards can be used to wait until the specified number of shards are active.
+func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
+ s.waitForActiveShards = &waitForActiveShards
+// WaitForNodes can be used to wait until the specified number of nodes are available.
+func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
+ s.waitForNodes = waitForNodes
+// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished.
+func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService {
+ s.waitForRelocatingShards = &waitForRelocatingShards
+// WaitForStatus can be used to wait until the cluster is in a specific state.
+// Valid values are: green, yellow, or red.
+func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
+ s.waitForStatus = waitForStatus
+// Level specifies the level of detail for returned information.
+func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
+ s.level = level
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
+ s.local = &local
+func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
+ // Build URL
+ path, err := uritemplates.Expand("/_cluster/health/{index}", map[string]string{
+ "index": strings.Join(s.indices, ","),
+ // Add query string parameters
+ params := url.Values{}
+ if s.waitForRelocatingShards != nil {
+ params.Set("wait_for_relocating_shards", fmt.Sprintf("%d", *s.waitForRelocatingShards))
+ if s.waitForStatus != "" {
+ params.Set("wait_for_status", s.waitForStatus)
+ if s.level != "" {
+ params.Set("level", s.level)
+ if s.local != nil {
+ params.Set("local", fmt.Sprintf("%v", *s.local))
+ if s.masterTimeout != "" {
+ params.Set("master_timeout", s.masterTimeout)
+ if s.waitForActiveShards != nil {
+ params.Set("wait_for_active_shards", fmt.Sprintf("%d", *s.waitForActiveShards))
+ if s.waitForNodes != "" {
+ params.Set("wait_for_nodes", s.waitForNodes)
+ return path, params, nil
+func (s *ClusterHealthService) Validate() error {
+func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) {
+ resp := new(ClusterHealthResponse)
+ if err := json.Unmarshal(res.Body, resp); err != nil {
+// ClusterHealthResponse is the response of ClusterHealthService.Do.
+type ClusterHealthResponse struct {
+ ClusterName string `json:"cluster_name"`
+ Status string `json:"status"`
+ TimedOut bool `json:"timed_out"`
+ NumberOfNodes int `json:"number_of_nodes"`
+ NumberOfDataNodes int `json:"number_of_data_nodes"`
+ ActivePrimaryShards int `json:"active_primary_shards"`
+ ActiveShards int `json:"active_shards"`
+ RelocatingShards int `json:"relocating_shards"`
+ InitializedShards int `json:"initialized_shards"`
+ UnassignedShards int `json:"unassigned_shards"`
@@ -0,0 +1,74 @@
+func TestClusterHealth(t *testing.T) {
+ // Get cluster health
+ res, err := client.ClusterHealth().Index(testIndexName).Do()
+ t.Fatalf("expected res to be != nil; got: %v", res)
+ if res.Status != "green" && res.Status != "red" && res.Status != "yellow" {
+ t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status)
+func TestClusterHealthURLs(t *testing.T) {
+ Service *ClusterHealthService
+ ExpectedPath string
+ ExpectedParams url.Values
+ Service: &ClusterHealthService{
+ indices: []string{},
+ ExpectedPath: "/_cluster/health/",
+ indices: []string{"twitter"},
+ ExpectedPath: "/_cluster/health/twitter",
+ indices: []string{"twitter", "gplus"},
+ ExpectedPath: "/_cluster/health/twitter%2Cgplus",
+ waitForStatus: "yellow",
+ ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}},
+ gotPath, gotParams, err := test.Service.buildURL()
+ t.Fatalf("expected no error; got: %v", err)
+ if gotPath != test.ExpectedPath {
+ t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+ if gotParams.Encode() != test.ExpectedParams.Encode() {
+ t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
@@ -0,0 +1,192 @@
+// ClusterStateService returns the state of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-state.html.
+type ClusterStateService struct {
+ metrics []string
+ flatSettings *bool
+// NewClusterStateService creates a new ClusterStateService.
+func NewClusterStateService(client *Client) *ClusterStateService {
+ return &ClusterStateService{
+ metrics: make([]string, 0),
+// Index the name of the index. Use _all or an empty string to perform
+// the operation on all indices.
+func (s *ClusterStateService) Index(index string) *ClusterStateService {
+// Indices is a list of index names. Use _all or an empty string to
+// perform the operation on all indices.
+func (s *ClusterStateService) Indices(indices ...string) *ClusterStateService {
+// Metric limits the information returned to the specified metric.
+// It can be one of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metric(metric string) *ClusterStateService {
+ s.metrics = make([]string, 0)
+ s.metrics = append(s.metrics, metric)
+// Metrics limits the information returned to the specified metrics.
+// It can be any of: version, master_node, nodes, routing_table, metadata,
+func (s *ClusterStateService) Metrics(metrics ...string) *ClusterStateService {
+ s.metrics = append(s.metrics, metrics...)
+func (s *ClusterStateService) Local(local bool) *ClusterStateService {
+// MasterTimeout specifies the timeout for connection to master.
+func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
+ s.flatSettings = &flatSettings
+func (s *ClusterStateService) buildURL() (string, url.Values, error) {
+ metrics := strings.Join(s.metrics, ",")
+ if metrics == "" {
+ metrics = "_all"
+ indices := strings.Join(s.indices, ",")
+ if indices == "" {
+ indices = "_all"
+ path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
+ "metrics": metrics,
+ "indices": indices,
+ if s.flatSettings != nil {
+ params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+func (s *ClusterStateService) Validate() error {
+func (s *ClusterStateService) Do() (*ClusterStateResponse, error) {
+ ret := new(ClusterStateResponse)
+// ClusterStateResponse is the response of ClusterStateService.Do.
+type ClusterStateResponse struct {
+ Version int `json:"version"`
+ MasterNode string `json:"master_node"`
+ Blocks map[string]interface{} `json:"blocks"`
+ Nodes map[string]*ClusterStateNode `json:"nodes"`
+ Metadata *ClusterStateMetadata `json:"metadata"`
+ RoutingTable map[string]*ClusterStateRoutingTable `json:"routing_table"`
+ RoutingNodes *ClusterStateRoutingNode `json:"routing_nodes"`
+ Allocations []interface{} `json:"allocations"`
+ Customs map[string]interface{} `json:"customs"`
+type ClusterStateMetadata struct {
+ Templates map[string]interface{} `json:"templates"`
+ Indices map[string]interface{} `json:"indices"`
+ Repositories map[string]interface{} `json:"repositories"`
+type ClusterStateNode struct {
+ State string `json:"state"`
+ Primary bool `json:"primary"`
+ Node string `json:"node"`
+ RelocatingNode *string `json:"relocating_node"`
+ Shard int `json:"shard"`
+ Index string `json:"index"`
+type ClusterStateRoutingTable struct {
+type ClusterStateRoutingNode struct {
+ Unassigned []interface{} `json:"unassigned"`
+ Nodes map[string]interface{} `json:"nodes"`
@@ -0,0 +1,92 @@
+func TestClusterState(t *testing.T) {
+ // Get cluster state
+ res, err := client.ClusterState().Do()
+ if res.ClusterName == "" {
+ t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
+func TestClusterStateURLs(t *testing.T) {
+ Service *ClusterStateService
+ Service: &ClusterStateService{
+ metrics: []string{},
+ ExpectedPath: "/_cluster/state/_all/_all",
+ ExpectedPath: "/_cluster/state/_all/twitter",
+ ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus",
+ metrics: []string{"nodes"},
+ ExpectedPath: "/_cluster/state/nodes/_all",
+ ExpectedPath: "/_cluster/state/nodes/twitter",
+ masterTimeout: "1s",
+ ExpectedParams: url.Values{"master_timeout": []string{"1s"}},
@@ -0,0 +1,90 @@
+// conn represents a single connection to a node in a cluster.
+type conn struct {
+ sync.RWMutex
+ nodeID string // node ID
+ url string
+ failures int
+ dead bool
+ deadSince *time.Time
+// newConn creates a new connection to the given URL.
+func newConn(nodeID, url string) *conn {
+ c := &conn{
+ nodeID: nodeID,
+ url: url,
+ return c
+// String returns a representation of the connection status.
+func (c *conn) String() string {
+ c.RLock()
+ defer c.RUnlock()
+ return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
+// NodeID returns the ID of the node of this connection.
+func (c *conn) NodeID() string {
+ return c.nodeID
+// URL returns the URL of this connection.
+func (c *conn) URL() string {
+ return c.url
+// IsDead returns true if this connection is marked as dead, i.e. a previous
+// request to the URL has been unsuccessful.
+func (c *conn) IsDead() bool {
+ return c.dead
+// MarkAsDead marks this connection as dead, increments the failures
+// counter and stores the current time in dead since.
+func (c *conn) MarkAsDead() {
+ c.Lock()
+ c.dead = true
+ if c.deadSince == nil {
+ utcNow := time.Now().UTC()
+ c.deadSince = &utcNow
+ c.failures += 1
+ c.Unlock()
+// MarkAsAlive marks this connection as eligible to be returned from the
+// pool of connections by the selector.
+func (c *conn) MarkAsAlive() {
+ c.dead = false
+// MarkAsHealthy marks this connection as healthy, i.e. a request has been
+// successfully performed with it.
+func (c *conn) MarkAsHealthy() {
+ c.deadSince = nil
+ c.failures = 0
@@ -0,0 +1,152 @@
+// CountService is a convenient service for determining the
+// number of documents in an index. Use SearchService with
+// a SearchType of count for counting with queries etc.
+type CountService struct {
+ types []string
+ query Query
+// CountResult is the result returned from using the Count API
+// (http://www.elasticsearch.org/guide/reference/api/count/)
+type CountResult struct {
+ Count int64 `json:"count"`
+ Shards shardsInfo `json:"_shards,omitempty"`
+func NewCountService(client *Client) *CountService {
+ builder := &CountService{
+func (s *CountService) Index(index string) *CountService {
+ if s.indices == nil {
+func (s *CountService) Indices(indices ...string) *CountService {
+func (s *CountService) Type(typ string) *CountService {
+ if s.types == nil {
+ s.types = make([]string, 0)
+ s.types = append(s.types, typ)
+func (s *CountService) Types(types ...string) *CountService {
+ s.types = append(s.types, types...)
+func (s *CountService) Query(query Query) *CountService {
+ s.query = query
+func (s *CountService) Pretty(pretty bool) *CountService {
+func (s *CountService) Do() (int64, error) {
+ return 0, err
+ if len(indexPart) > 0 {
+ // Types part
+ typesPart := make([]string, 0)
+ for _, typ := range s.types {
+ typ, err = uritemplates.Expand("{type}", map[string]string{
+ "type": typ,
+ typesPart = append(typesPart, typ)
+ if len(typesPart) > 0 {
+ path += "/" + strings.Join(typesPart, ",")
+ path += "/_count"
+ // Set body if there is a query specified
+ var body interface{}
+ if s.query != nil {
+ query := make(map[string]interface{})
+ query["query"] = s.query.Source()
+ body = query
+ // Return result
+ ret := new(CountResult)
+ if ret != nil {
+ return ret.Count, nil
+ return int64(0), nil
@@ -0,0 +1,83 @@
+import "testing"
+func TestCount(t *testing.T) {
+ // Count documents
+ count, err := client.Count(testIndexName).Do()
+ if count != 3 {
+ t.Errorf("expected Count = %d; got %d", 3, count)
+ count, err = client.Count(testIndexName).Type("tweet").Do()
+ count, err = client.Count(testIndexName).Type("gezwitscher").Do()
+ if count != 0 {
+ t.Errorf("expected Count = %d; got %d", 0, count)
+ // Count with query
+ query := NewTermQuery("user", "olivere")
+ count, err = client.Count(testIndexName).Query(query).Do()
+ if count != 2 {
+ t.Errorf("expected Count = %d; got %d", 2, count)
+ // Count with query and type
+ query = NewTermQuery("user", "olivere")
+ count, err = client.Count(testIndexName).Type("tweet").Query(query).Do()
@@ -0,0 +1,75 @@
+type CreateIndexService struct {
+ body string
+func NewCreateIndexService(client *Client) *CreateIndexService {
+ builder := &CreateIndexService{
+func (b *CreateIndexService) Index(index string) *CreateIndexService {
+ b.index = index
+ return b
+func (b *CreateIndexService) Body(body string) *CreateIndexService {
+ b.body = body
+func (b *CreateIndexService) Pretty(pretty bool) *CreateIndexService {
+ b.pretty = pretty
+func (b *CreateIndexService) Do() (*CreateIndexResult, error) {
+ path, err := uritemplates.Expand("/{index}/", map[string]string{
+ "index": b.index,
+ if b.pretty {
+ params.Set("pretty", fmt.Sprintf("%v", b.pretty))
+ res, err := b.client.PerformRequest("PUT", path, params, b.body)
+ ret := new(CreateIndexResult)
+// -- Result of a create index request.
+type CreateIndexResult struct {
@@ -0,0 +1,26 @@
+// Decoder is used to decode responses from Elasticsearch.
+// Users of elastic can implement their own marshaler for advanced purposes
+// and set them per Client (see SetDecoder). If none is specified,
+// DefaultDecoder is used.
+type Decoder interface {
+ Decode(data []byte, v interface{}) error
+// DefaultDecoder uses json.Unmarshal from the Go standard library
+// to decode JSON data.
+type DefaultDecoder struct{}
+// Decode decodes with json.Unmarshal from the Go standard library.
+func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
@@ -0,0 +1,49 @@
+type decoder struct {
+ dec json.Decoder
+ N int64
+func (d *decoder) Decode(data []byte, v interface{}) error {
+ atomic.AddInt64(&d.N, 1)
+ dec := json.NewDecoder(bytes.NewReader(data))
+ dec.UseNumber()
+ return dec.Decode(v)
+func TestDecoder(t *testing.T) {
+ dec := &decoder{}
+ client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0))
+ tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+ // Add a document
+ indexResult, err := client.Index().
+ Index(testIndexName).
+ BodyJson(&tweet).
+ if indexResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", indexResult)
+ if dec.N <= 0 {
+ t.Errorf("expected at least 1 call of decoder; got: %d", dec.N)
@@ -0,0 +1,118 @@
+type DeleteService struct {
+ version *int
+func NewDeleteService(client *Client) *DeleteService {
+ builder := &DeleteService{
+func (s *DeleteService) Index(index string) *DeleteService {
+func (s *DeleteService) Type(_type string) *DeleteService {
+func (s *DeleteService) Id(id string) *DeleteService {
+ s.id = id
+func (s *DeleteService) Parent(parent string) *DeleteService {
+ if s.routing == "" {
+ s.routing = parent
+func (s *DeleteService) Refresh(refresh bool) *DeleteService {
+func (s *DeleteService) Version(version int) *DeleteService {
+ s.version = &version
+func (s *DeleteService) Pretty(pretty bool) *DeleteService {
+func (s *DeleteService) Do() (*DeleteResult, error) {
+ path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+ "id": s.id,
+ if s.version != nil {
+ params.Set("version", fmt.Sprintf("%d", *s.version))
+ if s.routing != "" {
+ params.Set("routing", fmt.Sprintf("%s", s.routing))
+ res, err := s.client.PerformRequest("DELETE", path, params, nil)
+ // Return response
+ ret := new(DeleteResult)
+// -- Result of a delete request.
+type DeleteResult struct {
+ Found bool `json:"found"`
+ Index string `json:"_index"`
+ Type string `json:"_type"`
+ Id string `json:"_id"`
+ Version int64 `json:"_version"`
@@ -0,0 +1,292 @@
+// DeleteByQueryService deletes documents that match a query.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html.
+type DeleteByQueryService struct {
+ analyzer string
+ consistency string
+ defaultOper string
+ df string
+ ignoreUnavailable *bool
+ allowNoIndices *bool
+ expandWildcards string
+ replication string
+ q string
+// NewDeleteByQueryService creates a new DeleteByQueryService.
+// You typically use the client's DeleteByQuery to get a reference to
+// the service.
+func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
+ builder := &DeleteByQueryService{
+// Index limits the delete-by-query to a single index.
+// You can use _all to perform the operation on all indices.
+func (s *DeleteByQueryService) Index(index string) *DeleteByQueryService {
+// Indices sets the indices on which to perform the delete operation.
+func (s *DeleteByQueryService) Indices(indices ...string) *DeleteByQueryService {
+// Type limits the delete operation to the given type.
+func (s *DeleteByQueryService) Type(typ string) *DeleteByQueryService {
+// Types limits the delete operation to the given types.
+func (s *DeleteByQueryService) Types(types ...string) *DeleteByQueryService {
+// Analyzer to use for the query string.
+func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
+ s.analyzer = analyzer
+// Consistency represents the specific write consistency setting for the operation.
+// It can be one, quorum, or all.
+func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService {
+ s.consistency = consistency
+// DefaultOperator for query string query (AND or OR).
+func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
+ s.defaultOper = defaultOperator
+// DF is the field to use as default where no field prefix is given in the query string.
+func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
+ s.df = defaultField
+// DefaultField is the field to use as default where no field prefix is given in the query string.
+// It is an alias to the DF func.
+func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
+ s.ignoreUnavailable = &ignore
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices (including the _all string
+// or when no indices have been specified).
+func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
+ s.allowNoIndices = &allow
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both. It can be "open" or "closed".
+func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
+ s.expandWildcards = expand
+// Replication sets a specific replication type (sync or async).
+func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService {
+ s.replication = replication
+// Q specifies the query in Lucene query string syntax. You can also use
+// Query to programmatically specify the query.
+func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
+ s.q = query
+// QueryString is an alias to Q. Notice that you can also use Query to
+// programmatically set the query.
+func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
+// Routing sets a specific routing value.
+func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService {
+ s.routing = routing
+// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms".
+func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
+// Pretty indents the JSON output from Elasticsearch.
+func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
+// Query sets the query programmatically.
+func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
+// Do executes the delete-by-query operation.
+func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) {
+ path += "/_query"
+ if s.analyzer != "" {
+ params.Set("analyzer", s.analyzer)
+ if s.consistency != "" {
+ params.Set("consistency", s.consistency)
+ if s.defaultOper != "" {
+ params.Set("default_operator", s.defaultOper)
+ if s.df != "" {
+ params.Set("df", s.df)
+ if s.ignoreUnavailable != nil {
+ params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+ if s.allowNoIndices != nil {
+ params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+ if s.expandWildcards != "" {
+ params.Set("expand_wildcards", s.expandWildcards)
+ if s.replication != "" {
+ params.Set("replication", s.replication)
+ params.Set("routing", s.routing)
+ if s.q != "" {
+ params.Set("q", s.q)
+ // Set body if there is a query set
+ ret := new(DeleteByQueryResult)
+// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService.
+type DeleteByQueryResult struct {
+ Indices map[string]IndexDeleteByQueryResult `json:"_indices"`
+// IndexDeleteByQueryResult is the result of a delete-by-query for a specific
+// index.
+type IndexDeleteByQueryResult struct {
+ Shards shardsInfo `json:"_shards"`
@@ -0,0 +1,76 @@
+func TestDeleteByQuery(t *testing.T) {
+ t.Fatalf("expected count = %d; got: %d", 3, count)
+ // Delete all documents by sandrae
+ q := NewTermQuery("user", "sandrae")
+ res, err := client.DeleteByQuery().Index(testIndexName).Type("tweet").Query(q).Do()
+ t.Fatalf("expected response != nil; got: %v", res)
+ idx, found := res.Indices[testIndexName]
+ t.Errorf("expected Found = true; got: %v", found)
+ if idx.Shards.Failed > 0 {
+ t.Errorf("expected no failed shards; got: %d", idx.Shards.Failed)
+ count, err = client.Count(testIndexName).Do()
+ t.Fatalf("expected Count = %d; got: %d", 2, count)
@@ -0,0 +1,57 @@
+type DeleteIndexService struct {
+func NewDeleteIndexService(client *Client) *DeleteIndexService {
+ builder := &DeleteIndexService{
+func (b *DeleteIndexService) Index(index string) *DeleteIndexService {
+func (b *DeleteIndexService) Do() (*DeleteIndexResult, error) {
+ res, err := b.client.PerformRequest("DELETE", path, nil, nil)
+ ret := new(DeleteIndexResult)
+// -- Result of a delete index request.
+type DeleteIndexResult struct {
@@ -0,0 +1,136 @@
+// DeleteMappingService allows to delete a mapping along with its data.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-mapping.html.
+type DeleteMappingService struct {
+ index []string
+ typ []string
+// NewDeleteMappingService creates a new DeleteMappingService.
+func NewDeleteMappingService(client *Client) *DeleteMappingService {
+ return &DeleteMappingService{
+ index: make([]string, 0),
+ typ: make([]string, 0),
+// Index is a list of index names (supports wildcards). Use `_all` for all indices.
+func (s *DeleteMappingService) Index(index ...string) *DeleteMappingService {
+ s.index = append(s.index, index...)
+// Type is a list of document types to delete (supports wildcards).
+// Use `_all` to delete all document types in the specified indices..
+func (s *DeleteMappingService) Type(typ ...string) *DeleteMappingService {
+ s.typ = append(s.typ, typ...)
+// MasterTimeout specifies the timeout for connecting to master.
+func (s *DeleteMappingService) MasterTimeout(masterTimeout string) *DeleteMappingService {
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *DeleteMappingService) Pretty(pretty bool) *DeleteMappingService {
+func (s *DeleteMappingService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ "index": strings.Join(s.index, ","),
+ "type": strings.Join(s.typ, ","),
+ params.Set("pretty", "1")
+func (s *DeleteMappingService) Validate() error {
+ var invalid []string
+ if len(s.index) == 0 {
+ invalid = append(invalid, "Index")
+ if len(s.typ) == 0 {
+ invalid = append(invalid, "Type")
+ if len(invalid) > 0 {
+ return fmt.Errorf("missing required fields: %v", invalid)
+func (s *DeleteMappingService) Do() (*DeleteMappingResponse, error) {
+ ret := new(DeleteMappingResponse)
+// DeleteMappingResponse is the response of DeleteMappingService.Do.
+type DeleteMappingResponse struct {
@@ -0,0 +1,40 @@
+func TestDeleteMappingURL(t *testing.T) {
+ Indices []string
+ Types []string
+ Expected string
+ []string{"twitter"},
+ []string{"tweet"},
+ "/twitter/_mapping/tweet",
+ []string{"store-1", "store-2"},
+ []string{"tweet", "user"},
+ "/store-1%2Cstore-2/_mapping/tweet%2Cuser",
+ path, _, err := client.DeleteMapping().Index(test.Indices...).Type(test.Types...).buildURL()
+ if path != test.Expected {
+ t.Errorf("expected %q; got: %q", test.Expected, path)
+// DeleteTemplateService deletes a search template. More information can
+// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type DeleteTemplateService struct {
+ versionType string
+// NewDeleteTemplateService creates a new DeleteTemplateService.
+func NewDeleteTemplateService(client *Client) *DeleteTemplateService {
+ return &DeleteTemplateService{
+// Id is the template ID.
+func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService {
+// Version an explicit version number for concurrency control.
+func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService {
+// VersionType specifies a version type.
+func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService {
+ s.versionType = versionType
+func (s *DeleteTemplateService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+ if s.versionType != "" {
+ params.Set("version_type", s.versionType)
+func (s *DeleteTemplateService) Validate() error {
+ if s.id == "" {
+ invalid = append(invalid, "Id")
+func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) {
+ ret := new(DeleteTemplateResponse)
+// DeleteTemplateResponse is the response of DeleteTemplateService.Do.
+type DeleteTemplateResponse struct {
+ Version int `json:"_version"`
+func TestDelete(t *testing.T) {
+ // Delete document 1
+ res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do()
+ if res.Found != true {
+ t.Errorf("expected Found = true; got %v", res.Found)
+ // Delete non existent document 99
+ res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh(true).Do()
+ if res.Found != false {
+ t.Errorf("expected Found = false; got %v", res.Found)
@@ -0,0 +1,51 @@
+/*
+Package elastic provides an interface to the Elasticsearch server
+(http://www.elasticsearch.org/).
+The first thing you do is to create a Client. If you have Elasticsearch
+installed and running with its default settings
+(i.e. available at http://127.0.0.1:9200), all you need to do is:
+ client, err := elastic.NewClient()
+If your Elasticsearch server is running on a different IP and/or port,
+just provide a URL to NewClient:
+ // Create a client and connect to http://192.168.2.10:9201
+ client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
+You can pass many more configuration parameters to NewClient. Review the
+documentation of NewClient for more information.
+If no Elasticsearch server is available, services will fail when creating
+a new request and will return ErrNoClient.
+A Client provides services. The services usually come with a variety of
+methods to prepare the query and a Do function to execute it against the
+Elasticsearch REST interface and return a response. Here is an example
+of the IndexExists service that checks if a given index already exists.
+ exists, err := client.IndexExists("twitter").Do()
+ // Index does not exist yet.
+Look up the documentation for Client to get an idea of the services provided
+and what kinds of responses you get when executing the Do function of a service.
+Also see the wiki on Github for more details.
+*/
+ "io/ioutil"
+func checkResponse(res *http.Response) error {
+ // 200-299 and 404 are valid status codes
+ if (res.StatusCode >= 200 && res.StatusCode <= 299) || res.StatusCode == http.StatusNotFound {
+ if res.Body == nil {
+ return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+ slurp, err := ioutil.ReadAll(res.Body)
+ return fmt.Errorf("elastic: Error %d (%s) when reading body: %v", res.StatusCode, http.StatusText(res.StatusCode), err)
+ errReply := new(Error)
+ err = json.Unmarshal(slurp, errReply)
+ if err == nil && errReply != nil {
+ if errReply.Status == 0 {
+ errReply.Status = res.StatusCode
+ return errReply
+type Error struct {
+ Status int `json:"status"`
+ Message string `json:"error"`
+func (e *Error) Error() string {
+ if e.Message != "" {
+ return fmt.Sprintf("elastic: Error %d (%s): %s", e.Status, http.StatusText(e.Status), e.Message)
+ return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
@@ -0,0 +1,45 @@
+ "bufio"
+func TestResponseError(t *testing.T) {
+ message := "Something went seriously wrong."
+ raw := "HTTP/1.1 500 Internal Server Error\r\n" +
+ "\r\n" +
+ `{"status":500,"error":"` + message + `"}` + "\r\n"
+ r := bufio.NewReader(strings.NewReader(raw))
+ resp, err := http.ReadResponse(r, nil)
+ err = checkResponse(resp)
+ t.Fatalf("expected error; got: %v", err)
+ // Check for correct error message
+ expected := fmt.Sprintf("elastic: Error %d (%s): %s", resp.StatusCode, http.StatusText(resp.StatusCode), message)
+ got := err.Error()
+ t.Fatalf("expected %q; got: %q", expected, got)
+ // Check that error is of type *elastic.Error, which contains additional information
+ e, ok := err.(*Error)
+ if !ok {
+ t.Fatal("expected error to be of type *elastic.Error")
+ if e.Status != resp.StatusCode {
+ t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status)
+ if e.Message != message {
+ t.Fatalf("expected error message %q; got: %q", message, e.Message)
@@ -0,0 +1,547 @@
+package elastic_test
+func Example() {
+ errorlog := log.New(os.Stdout, "APP ", log.LstdFlags)
+ // Obtain a client. You can provide your own HTTP client here.
+ client, err := elastic.NewClient(elastic.SetErrorLog(errorlog))
+ //client.SetTracer(log.New(os.Stdout, "", 0))
+ // Ping the Elasticsearch server to get e.g. the version number
+ info, code, err := client.Ping().Do()
+ fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number)
+ // Getting the ES version number is quite common, so there's a shortcut
+ esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200")
+ fmt.Printf("Elasticsearch version %s", esversion)
+ createIndex, err := client.CreateIndex("twitter").Do()
+ // Not acknowledged
+ put1, err := client.Index().
+ fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
+ put2, err := client.Index().
+ fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type)
+ get1, err := client.Get().
+ if get1.Found {
+ fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+ _, err = client.Flush().Index("twitter").Do()
+ searchResult, err := client.Search().
+ fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+ // Each is a convenience function that iterates over hits in a search result.
+ // It makes sure you don't need to check for nil values in the response.
+ // However, it ignores errors in serialization. If you want full control
+ // over iterating the hits, see below.
+ var ttyp Tweet
+ for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+ // TotalHits is another convenience function that works even when something goes wrong.
+ fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+ // Here's how you iterate through results with full control over each step.
+ // Update a tweet by the update API of Elasticsearch.
+ // We just increment the number of retweets.
+ update, err := client.Update().Index("twitter").Type("tweet").Id("1").
+ Script("ctx._source.retweets += num").
+ ScriptParams(map[string]interface{}{"num": 1}).
+ Upsert(map[string]interface{}{"retweets": 0}).
+ fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version)
+ // Delete an index.
+ deleteIndex, err := client.DeleteIndex("twitter").Do()
+func ExampleClient_NewClient_default() {
+ // Obtain a client to the Elasticsearch instance on http://localhost:9200.
+ fmt.Printf("connection failed: %v\n", err)
+ fmt.Println("connected")
+ _ = client
+ // Output:
+ // connected
+func ExampleClient_NewClient_cluster() {
+ // Obtain a client for an Elasticsearch cluster of two nodes,
+ // running on 10.0.1.1 and 10.0.1.2.
+ client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"))
+func ExampleClient_NewClient_manyOptions() {
+ // running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer.
+ // Set the healthcheck interval to 10s. When requests fail,
+ // retry 5 times. Print error messages to os.Stderr and informational
+ // messages to os.Stdout.
+ elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"),
+ elastic.SetSniff(false),
+ elastic.SetHealthcheckInterval(10*time.Second),
+ elastic.SetMaxRetries(5),
+ elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)),
+ elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags)))
+func ExampleIndexExistsService() {
+ // Get a client to the local Elasticsearch instance.
+ // Use the IndexExists service to check if the index "twitter" exists.
+func ExampleCreateIndexService() {
+func ExampleDeleteIndexService() {
+func ExampleSearchService() {
+func ExampleAggregations() {
+ // Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year).
+ timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
+ histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year")
+ timeline = timeline.SubAggregation("history", histogram)
+ Query(elastic.NewMatchAllQuery()). // return all results, but ...
+ SearchType("count"). // ... do not return hits, just the count
+ Aggregation("timeline", timeline). // add our aggregation to the query
+ // Access "timeline" aggregate in search result.
+ agg, found := searchResult.Aggregations.Terms("timeline")
+ log.Fatalf("we sould have a terms aggregation called %q", "timeline")
+ for _, userBucket := range agg.Buckets {
+ // Every bucket should have the user field as key.
+ user := userBucket.Key
+ // The sub-aggregation history should have the number of tweets per year.
+ histogram, found := userBucket.DateHistogram("history")
+ if found {
+ for _, year := range histogram.Buckets {
+ fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString)
+func ExampleSearchResult() {
+ // Do a search
+ searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do()
+ // Each is a utility function that iterates over hits in a search result.
+ // Here's how you iterate hits with full control.
+func ExamplePutTemplateService() {
+ // Create search template
+ tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}`
+ // Create template
+ resp, err := client.PutTemplate().
+ Id("my-search-template"). // Name of the template
+ BodyString(tmpl). // Search template itself
+ Do() // Execute
+ if resp.Created {
+ fmt.Println("search template created")
+func ExampleGetTemplateService() {
+ // Get template stored under "my-search-template"
+ resp, err := client.GetTemplate().Id("my-search-template").Do()
+ fmt.Printf("search template is: %q\n", resp.Template)
+func ExampleDeleteTemplateService() {
+ // Delete template
+ resp, err := client.DeleteTemplate().Id("my-search-template").Do()
+ if resp != nil && resp.Found {
+ fmt.Println("template deleted")
+func ExampleClusterHealthService() {
+ res, err := client.ClusterHealth().Index("twitter").Do()
+ fmt.Printf("Cluster status is %q\n", res.Status)
+func ExampleClusterHealthService_WaitForGreen() {
+ // Wait for status green
+ res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do()
+ if res.TimedOut {
+ fmt.Printf("time out waiting for cluster status %q\n", "green")
+ fmt.Printf("cluster status is %q\n", res.Status)
+func ExampleClusterStateService() {
+ res, err := client.ClusterState().Metric("version").Do()
+ fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version)
@@ -0,0 +1,71 @@
+type ExistsService struct {
+func NewExistsService(client *Client) *ExistsService {
+ builder := &ExistsService{
+func (s *ExistsService) String() string {
+ return fmt.Sprintf("exists([%v][%v][%v])",
+ s.index,
+ s._type,
+ s.id)
+func (s *ExistsService) Index(index string) *ExistsService {
+func (s *ExistsService) Type(_type string) *ExistsService {
+func (s *ExistsService) Id(id string) *ExistsService {
+func (s *ExistsService) Do() (bool, error) {
+ return false, err
+ res, err := s.client.PerformRequest("HEAD", path, nil, nil)
+ if res.StatusCode == 200 {
+ return true, nil
+ } else if res.StatusCode == 404 {
+ return false, nil
+ return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
@@ -0,0 +1,329 @@
+// ExplainService computes a score explanation for a query and
+// a specific document.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html.
+type ExplainService struct {
+ lenient *bool
+ fields []string
+ lowercaseExpandedTerms *bool
+ xSourceInclude []string
+ analyzeWildcard *bool
+ preference string
+ xSource []string
+ defaultOperator string
+ xSourceExclude []string
+ source string
+// NewExplainService creates a new ExplainService.
+func NewExplainService(client *Client) *ExplainService {
+ return &ExplainService{
+ xSource: make([]string, 0),
+ xSourceExclude: make([]string, 0),
+ fields: make([]string, 0),
+ xSourceInclude: make([]string, 0),
+// Id is the document ID.
+func (s *ExplainService) Id(id string) *ExplainService {
+// Index is the name of the index.
+func (s *ExplainService) Index(index string) *ExplainService {
+// Type is the type of the document.
+func (s *ExplainService) Type(typ string) *ExplainService {
+ s.typ = typ
+// Source is the URL-encoded query definition (instead of using the request body).
+func (s *ExplainService) Source(source string) *ExplainService {
+ s.source = source
+// XSourceExclude is a list of fields to exclude from the returned _source field.
+func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
+ s.xSourceExclude = make([]string, 0)
+ s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
+// Lenient specifies whether format-based query failures
+// (such as providing text to a numeric field) should be ignored.
+func (s *ExplainService) Lenient(lenient bool) *ExplainService {
+ s.lenient = &lenient
+// Query in the Lucene query string syntax.
+func (s *ExplainService) Q(q string) *ExplainService {
+ s.q = q
+func (s *ExplainService) Routing(routing string) *ExplainService {
+// AnalyzeWildcard specifies whether wildcards and prefix queries
+// in the query string query should be analyzed (default: false).
+func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
+ s.analyzeWildcard = &analyzeWildcard
+// Analyzer is the analyzer for the query string query.
+func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
+// Df is the default field for query string query (default: _all).
+func (s *ExplainService) Df(df string) *ExplainService {
+ s.df = df
+// Fields is a list of fields to return in the response.
+func (s *ExplainService) Fields(fields ...string) *ExplainService {
+ s.fields = make([]string, 0)
+ s.fields = append(s.fields, fields...)
+// LowercaseExpandedTerms specifies whether query terms should be lowercased.
+func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
+ s.lowercaseExpandedTerms = &lowercaseExpandedTerms
+// XSourceInclude is a list of fields to extract and return from the _source field.
+func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
+ s.xSourceInclude = make([]string, 0)
+ s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
+// DefaultOperator is the default operator for query string query (AND or OR).
+func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
+ s.defaultOperator = defaultOperator
+// Parent is the ID of the parent document.
+func (s *ExplainService) Parent(parent string) *ExplainService {
+ s.parent = parent
+// Preference specifies the node or shard the operation should be performed on (default: random).
+func (s *ExplainService) Preference(preference string) *ExplainService {
+ s.preference = preference
+// XSource is true or false to return the _source field or not, or a list of fields to return.
+func (s *ExplainService) XSource(xSource ...string) *ExplainService {
+ s.xSource = make([]string, 0)
+ s.xSource = append(s.xSource, xSource...)
+func (s *ExplainService) Pretty(pretty bool) *ExplainService {
+// Query sets a query definition using the Query DSL.
+func (s *ExplainService) Query(query Query) *ExplainService {
+ body["query"] = query.Source()
+ s.bodyJson = body
+// BodyJson sets the query definition using the Query DSL.
+func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
+// BodyString sets the query definition using the Query DSL as a string.
+func (s *ExplainService) BodyString(body string) *ExplainService {
+ s.bodyString = body
+func (s *ExplainService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
+ "type": s.typ,
+ if len(s.xSource) > 0 {
+ params.Set("_source", strings.Join(s.xSource, ","))
+ if s.defaultOperator != "" {
+ params.Set("default_operator", s.defaultOperator)
+ if s.parent != "" {
+ params.Set("parent", s.parent)
+ if s.preference != "" {
+ params.Set("preference", s.preference)
+ if s.source != "" {
+ params.Set("source", s.source)
+ if len(s.xSourceExclude) > 0 {
+ params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
+ if s.lenient != nil {
+ params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
+ if len(s.fields) > 0 {
+ params.Set("fields", strings.Join(s.fields, ","))
+ if s.lowercaseExpandedTerms != nil {
+ params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
+ if len(s.xSourceInclude) > 0 {
+ params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
+ if s.analyzeWildcard != nil {
+ params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
+func (s *ExplainService) Validate() error {
+ if s.index == "" {
+ if s.typ == "" {
+func (s *ExplainService) Do() (*ExplainResponse, error) {
+ if s.bodyJson != nil {
+ body = s.bodyJson
+ body = s.bodyString
+ res, err := s.client.PerformRequest("GET", path, params, body)
+ ret := new(ExplainResponse)
+// ExplainResponse is the response of ExplainService.Do.
+type ExplainResponse struct {
+ Matched bool `json:"matched"`
+ Explanation map[string]interface{} `json:"explanation"`
+func TestExplain(t *testing.T) {
+ BodyJson(&tweet1).
+ Refresh(true).
+ // Explain
+ expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do()
+ if expl == nil {
+ t.Fatal("expected to return an explanation")
+ if !expl.Matched {
+ t.Errorf("expected matched to be %v; got: %v", true, expl.Matched)
+type FetchSourceContext struct {
+ fetchSource bool
+ transformSource bool
+ includes []string
+ excludes []string
+func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
+ return &FetchSourceContext{
+ fetchSource: fetchSource,
+ includes: make([]string, 0),
+ excludes: make([]string, 0),
+func (fsc *FetchSourceContext) FetchSource() bool {
+ return fsc.fetchSource
+func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
+ fsc.fetchSource = fetchSource
+func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
+ fsc.includes = append(fsc.includes, includes...)
+ return fsc
+func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
+ fsc.excludes = append(fsc.excludes, excludes...)
+func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext {
+ fsc.transformSource = transformSource
+func (fsc *FetchSourceContext) Source() interface{} {
+ if !fsc.fetchSource {
+ return map[string]interface{}{
+ "includes": fsc.includes,
+ "excludes": fsc.excludes,
+// Query returns the parameters in a form suitable for a URL query string.
+func (fsc *FetchSourceContext) Query() url.Values {
+ params.Add("_source", "false")
+ return params
+ if len(fsc.includes) > 0 {
+ params.Add("_source_include", strings.Join(fsc.includes, ","))
+ if len(fsc.excludes) > 0 {
+ params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
+func TestFetchSourceContextNoFetchSource(t *testing.T) {
+ builder := NewFetchSourceContext(false)
+ data, err := json.Marshal(builder.Source())
+ t.Fatalf("marshaling to JSON failed: %v", err)
+ got := string(data)
+ expected := `false`
+ t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) {
+ builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c")
+func TestFetchSourceContextFetchSource(t *testing.T) {
+ builder := NewFetchSourceContext(true)
+ expected := `{"excludes":[],"includes":[]}`
+func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) {
+ builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
+ expected := `{"excludes":["c"],"includes":["a","b"]}`
+func TestFetchSourceContextQueryDefaults(t *testing.T) {
+ values := builder.Query()
+ got := values.Encode()
+ expected := ""
+ t.Errorf("expected %q; got: %q", expected, got)
+func TestFetchSourceContextQueryNoFetchSource(t *testing.T) {
+ expected := "_source=false"
+func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) {
+ expected := "_source_exclude=c&_source_include=a%2Cb"
@@ -0,0 +1,9 @@
+type Filter interface {
+ Source() interface{}
@@ -0,0 +1,167 @@
+// Flush allows to flush one or more indices. The flush process of an index
+// basically frees memory from the index by flushing data to the index
+// storage and clearing the internal transaction log.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html
+// for details.
+type FlushService struct {
+ force *bool
+ full *bool
+ waitIfOngoing *bool
+func NewFlushService(client *Client) *FlushService {
+ builder := &FlushService{
+func (s *FlushService) Index(index string) *FlushService {
+func (s *FlushService) Indices(indices ...string) *FlushService {
+// Force specifies whether to force a flush even if it is not necessary.
+func (s *FlushService) Force(force bool) *FlushService {
+ s.force = &force
+// Full, when set to true, creates a new index writer for the index and
+// refreshes all settings related to the index.
+func (s *FlushService) Full(full bool) *FlushService {
+ s.full = &full
+// WaitIfOngoing will block until the flush can be executed (if set to true)
+// if another flush operation is already executing. The default is false
+// and will cause an exception to be thrown on the shard level if another
+// flush operation is already running. [1.4.0.Beta1]
+func (s *FlushService) WaitIfOngoing(wait bool) *FlushService {
+ s.waitIfOngoing = &wait
+// IgnoreUnavailable specifies whether concrete indices should be ignored
+// when unavailable (e.g. missing or closed).
+func (s *FlushService) IgnoreUnavailable(ignoreUnavailable bool) *FlushService {
+ s.ignoreUnavailable = &ignoreUnavailable
+// AllowNoIndices specifies whether to ignore if a wildcard expression
+// yields no indices. This includes the _all index or when no indices
+// have been specified.
+func (s *FlushService) AllowNoIndices(allowNoIndices bool) *FlushService {
+ s.allowNoIndices = &allowNoIndices
+// ExpandWildcards specifies whether to expand wildcards to concrete indices
+// that are open, closed, or both. Use one of "open", "closed", "none", or "all".
+func (s *FlushService) ExpandWildcards(expandWildcards string) *FlushService {
+ s.expandWildcards = expandWildcards
+// Do executes the service.
+func (s *FlushService) Do() (*FlushResult, error) {
+ if len(s.indices) > 0 {
+ path += strings.Join(indexPart, ",") + "/"
+ path += "_flush"
+ if s.force != nil {
+ params.Set("force", fmt.Sprintf("%v", *s.force))
+ if s.full != nil {
+ params.Set("full", fmt.Sprintf("%v", *s.full))
+ if s.waitIfOngoing != nil {
+ params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing))
+ res, err := s.client.PerformRequest("POST", path, params, nil)
+ ret := new(FlushResult)
+// -- Result of a flush request.
+type shardsInfo struct {
+ Total int `json:"total"`
+ Successful int `json:"successful"`
+ Failed int `json:"failed"`
+type FlushResult struct {
@@ -0,0 +1,22 @@
+func TestFlush(t *testing.T) {
+ // Flush all indices
+ res, err := client.Flush().Do()
+ t.Errorf("expected res to be != nil; got: %v", res)
@@ -0,0 +1,47 @@
+ "strconv"
+// GeoPoint is a geographic position described via latitude and longitude.
+type GeoPoint struct {
+ Lat, Lon float64
+// Source returns the object to be serialized in Elasticsearch DSL.
+func (pt *GeoPoint) Source() map[string]float64 {
+ return map[string]float64{
+ "lat": pt.Lat,
+ "lon": pt.Lon,
+// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
+func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
+ return &GeoPoint{Lat: lat, Lon: lon}
+// GeoPointFromString initializes a new GeoPoint by a string that is
+// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
+func GeoPointFromString(latLon string) (*GeoPoint, error) {
+ latlon := strings.SplitN(latLon, ",", 2)
+ if len(latlon) != 2 {
+ return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
+ lat, err := strconv.ParseFloat(latlon[0], 64)
+ lon, err := strconv.ParseFloat(latlon[1], 64)
+ return &GeoPoint{Lat: lat, Lon: lon}, nil
@@ -0,0 +1,24 @@
+func TestGeoPointSource(t *testing.T) {
+ pt := GeoPoint{Lat: 40, Lon: -70}
+ data, err := json.Marshal(pt.Source())
+ expected := `{"lat":40,"lon":-70}`
@@ -0,0 +1,223 @@
+type GetService struct {
+ realtime *bool
+ fsc *FetchSourceContext
+ version *int64
+ ignoreErrorsOnGeneratedFields *bool
+func NewGetService(client *Client) *GetService {
+ builder := &GetService{
+ typ: "_all",
+func (b *GetService) String() string {
+ return fmt.Sprintf("[%v][%v][%v]: routing [%v]",
+ b.index,
+ b.typ,
+ b.id,
+ b.routing)
+func (b *GetService) Index(index string) *GetService {
+func (b *GetService) Type(typ string) *GetService {
+ b.typ = typ
+func (b *GetService) Id(id string) *GetService {
+ b.id = id
+func (b *GetService) Parent(parent string) *GetService {
+ if b.routing == "" {
+ b.routing = parent
+func (b *GetService) Routing(routing string) *GetService {
+ b.routing = routing
+func (b *GetService) Preference(preference string) *GetService {
+ b.preference = preference
+func (b *GetService) Fields(fields ...string) *GetService {
+ if b.fields == nil {
+ b.fields = make([]string, 0)
+ b.fields = append(b.fields, fields...)
+func (s *GetService) FetchSource(fetchSource bool) *GetService {
+ if s.fsc == nil {
+ s.fsc = NewFetchSourceContext(fetchSource)
+ s.fsc.SetFetchSource(fetchSource)
+func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
+ s.fsc = fetchSourceContext
+func (b *GetService) Refresh(refresh bool) *GetService {
+ b.refresh = &refresh
+func (b *GetService) Realtime(realtime bool) *GetService {
+ b.realtime = &realtime
+func (b *GetService) VersionType(versionType string) *GetService {
+ b.versionType = versionType
+func (b *GetService) Version(version int64) *GetService {
+ b.version = &version
+func (b *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
+ b.ignoreErrorsOnGeneratedFields = &ignore
+func (s *GetService) Validate() error {
+func (b *GetService) Do() (*GetResult, error) {
+ if err := b.Validate(); err != nil {
+ "type": b.typ,
+ "id": b.id,
+ if b.realtime != nil {
+ params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+ if len(b.fields) > 0 {
+ params.Add("fields", strings.Join(b.fields, ","))
+ if b.routing != "" {
+ params.Add("routing", b.routing)
+ if b.preference != "" {
+ params.Add("preference", b.preference)
+ if b.refresh != nil {
+ params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+ if b.ignoreErrorsOnGeneratedFields != nil {
+ params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *b.ignoreErrorsOnGeneratedFields))
+ params.Add("_fields", strings.Join(b.fields, ","))
+ if b.version != nil {
+ params.Add("version", fmt.Sprintf("%d", *b.version))
+ if b.versionType != "" {
+ params.Add("version_type", b.versionType)
+ if b.fsc != nil {
+ for k, values := range b.fsc.Query() {
+ params.Add(k, strings.Join(values, ","))
+ res, err := b.client.PerformRequest("GET", path, params, nil)
+ ret := new(GetResult)
+// -- Result of a get request.
+type GetResult struct {
+ Version int64 `json:"_version,omitempty"`
+ Source *json.RawMessage `json:"_source,omitempty"`
+ Fields []string `json:"fields,omitempty"`
+ Error string `json:"error,omitempty"` // used only in MultiGet
@@ -0,0 +1,172 @@
+// GetMappingService retrieves the mapping definitions for an index or
+// index/type. See at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-mapping.html.
+type GetMappingService struct {
+// NewGetMappingService creates a new GetMappingService.
+func NewGetMappingService(client *Client) *GetMappingService {
+ return &GetMappingService{
+// Index is a list of index names.
+func (s *GetMappingService) Index(index ...string) *GetMappingService {
+// Type is a list of document types.
+func (s *GetMappingService) Type(typ ...string) *GetMappingService {
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *GetMappingService) AllowNoIndices(allowNoIndices bool) *GetMappingService {
+// concrete indices that are open, closed or both..
+func (s *GetMappingService) ExpandWildcards(expandWildcards string) *GetMappingService {
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *GetMappingService) Local(local bool) *GetMappingService {
+func (s *GetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *GetMappingService {
+func (s *GetMappingService) Pretty(pretty bool) *GetMappingService {
+func (s *GetMappingService) buildURL() (string, url.Values, error) {
+ var index, typ []string
+ if len(s.index) > 0 {
+ index = s.index
+ index = []string{"_all"}
+ if len(s.typ) > 0 {
+ typ = s.typ
+ typ = []string{"_all"}
+ "index": strings.Join(index, ","),
+ "type": strings.Join(typ, ","),
+func (s *GetMappingService) Validate() error {
+// Do executes the operation. When successful, it returns a json.RawMessage.
+// If you specify an index, Elasticsearch returns HTTP status 404.
+// if you specify a type that does not exist, Elasticsearch returns
+// an empty map.
+func (s *GetMappingService) Do() (map[string]interface{}, error) {
+ var ret map[string]interface{}
+ if err := json.Unmarshal(res.Body, &ret); err != nil {
@@ -0,0 +1,50 @@
+func TestGetMappingURL(t *testing.T) {
+ []string{},
+ "/_all/_mapping/_all",
+ "/_all/_mapping/tweet",
+ path, _, err := client.GetMapping().Index(test.Indices...).Type(test.Types...).buildURL()
@@ -0,0 +1,113 @@
+// GetTemplateService reads a search template.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type GetTemplateService struct {
+ version interface{}
+// NewGetTemplateService creates a new GetTemplateService.
+func NewGetTemplateService(client *Client) *GetTemplateService {
+ return &GetTemplateService{
+func (s *GetTemplateService) Id(id string) *GetTemplateService {
+// Version is an explicit version number for concurrency control.
+func (s *GetTemplateService) Version(version interface{}) *GetTemplateService {
+ s.version = version
+// VersionType is a specific version type.
+func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService {
+func (s *GetTemplateService) buildURL() (string, url.Values, error) {
+ params.Set("version", fmt.Sprintf("%v", s.version))
+func (s *GetTemplateService) Validate() error {
+// Do executes the operation and returns the template.
+func (s *GetTemplateService) Do() (*GetTemplateResponse, error) {
+ ret := new(GetTemplateResponse)
+type GetTemplateResponse struct {
+ Template string `json:"template"`
+func TestGetPutDeleteTemplate(t *testing.T) {
+ // This is a search template, not an index template!
+ tmpl := `{
+ "template": {
+ "query" : { "term" : { "{{my_field}}" : "{{my_value}}" } },
+ "size" : "{{my_size}}"
+ "params":{
+ "my_field" : "user",
+ "my_value" : "olivere",
+ "my_size" : 5
+ putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do()
+ if putres == nil {
+ t.Fatalf("expected response; got: %v", putres)
+ if !putres.Created {
+ t.Fatalf("expected template to be created; got: %v", putres.Created)
+ // Always delete template
+ defer client.DeleteTemplate().Id("elastic-template").Do()
+ // Get template
+ getres, err := client.GetTemplate().Id("elastic-template").Do()
+ if getres == nil {
+ t.Fatalf("expected response; got: %v", getres)
+ if getres.Template == "" {
+ t.Errorf("expected template %q; got: %q", tmpl, getres.Template)
@@ -0,0 +1,165 @@
+func TestGet(t *testing.T) {
+ // Get document 1
+ res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do()
+ if res.Source == nil {
+ t.Errorf("expected Source != nil; got %v", res.Source)
+ // Get non existent document 99
+ res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do()
+ if res.Source != nil {
+ t.Errorf("expected Source == nil; got %v", res.Source)
+func TestGetWithSourceFiltering(t *testing.T) {
+ // Get document 1, without source
+ res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do()
+ // Get document 1, exclude Message field
+ fsc := NewFetchSourceContext(true).Exclude("message")
+ res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do()
+ var tw tweet
+ err = json.Unmarshal(*res.Source, &tw)
+ if tw.User != "olivere" {
+ t.Errorf("expected user %q; got: %q", "olivere", tw.User)
+ if tw.Message != "" {
+ t.Errorf("expected message %q; got: %q", "", tw.Message)
+func TestGetFailsWithMissingParams(t *testing.T) {
+ // Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name
+ if _, err := client.Get().Do(); err == nil {
+ t.Fatal("expected Get to fail")
+ if _, err := client.Get().Index(testIndexName).Do(); err == nil {
+ if _, err := client.Get().Type("tweet").Do(); err == nil {
+ if _, err := client.Get().Id("1").Do(); err == nil {
+ if _, err := client.Get().Index(testIndexName).Type("tweet").Do(); err == nil {
+ if _, err := client.Get().Index(testIndexName).Id("1").Do(); err == nil {
+ if _, err := client.Get().Type("tweet").Id("1").Do(); err == nil {
@@ -0,0 +1,496 @@
+// Highlight allows highlighting search results on one or more fields.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+type Highlight struct {
+ fields []*HighlighterField
+ tagsSchema *string
+ highlightFilter *bool
+ fragmentSize *int
+ numOfFragments *int
+ preTags []string
+ postTags []string
+ order *string
+ encoder *string
+ requireFieldMatch *bool
+ boundaryMaxScan *int
+ boundaryChars []rune
+ highlighterType *string
+ fragmenter *string
+ highlightQuery Query
+ noMatchSize *int
+ phraseLimit *int
+ options map[string]interface{}
+ forceSource *bool
+ useExplicitFieldOrder bool
+func NewHighlight() *Highlight {
+ hl := &Highlight{
+ fields: make([]*HighlighterField, 0),
+ preTags: make([]string, 0),
+ postTags: make([]string, 0),
+ boundaryChars: make([]rune, 0),
+ options: make(map[string]interface{}),
+ return hl
+func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
+ hl.fields = append(hl.fields, fields...)
+func (hl *Highlight) Field(name string) *Highlight {
+ field := NewHighlighterField(name)
+ hl.fields = append(hl.fields, field)
+func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
+ hl.tagsSchema = &schemaName
+func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
+ hl.highlightFilter = &highlightFilter
+func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
+ hl.fragmentSize = &fragmentSize
+func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
+ hl.numOfFragments = &numOfFragments
+func (hl *Highlight) Encoder(encoder string) *Highlight {
+ hl.encoder = &encoder
+func (hl *Highlight) PreTags(preTags ...string) *Highlight {
+ hl.preTags = make([]string, 0)
+ hl.preTags = append(hl.preTags, preTags...)
+func (hl *Highlight) PostTags(postTags ...string) *Highlight {
+ hl.postTags = make([]string, 0)
+ hl.postTags = append(hl.postTags, postTags...)
+func (hl *Highlight) Order(order string) *Highlight {
+ hl.order = &order
+func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
+ hl.requireFieldMatch = &requireFieldMatch
+func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
+ hl.boundaryMaxScan = &boundaryMaxScan
+func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight {
+ hl.boundaryChars = make([]rune, 0)
+ hl.boundaryChars = append(hl.boundaryChars, boundaryChars...)
+func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
+ hl.highlighterType = &highlighterType
+func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
+ hl.fragmenter = &fragmenter
+func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
+ hl.highlightQuery = highlightQuery
+func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
+ hl.noMatchSize = &noMatchSize
+func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
+ hl.options = options
+func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
+ hl.forceSource = &forceSource
+func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
+ hl.useExplicitFieldOrder = useExplicitFieldOrder
+// Creates the query source for the bool query.
+func (hl *Highlight) Source() interface{} {
+ // Returns the map inside of "highlight":
+ // "highlight":{
+ // ... this ...
+ if hl.tagsSchema != nil {
+ source["tags_schema"] = *hl.tagsSchema
+ if hl.preTags != nil && len(hl.preTags) > 0 {
+ source["pre_tags"] = hl.preTags
+ if hl.postTags != nil && len(hl.postTags) > 0 {
+ source["post_tags"] = hl.postTags
+ if hl.order != nil {
+ source["order"] = *hl.order
+ if hl.highlightFilter != nil {
+ source["highlight_filter"] = *hl.highlightFilter
+ if hl.fragmentSize != nil {
+ source["fragment_size"] = *hl.fragmentSize
+ if hl.numOfFragments != nil {
+ source["number_of_fragments"] = *hl.numOfFragments
+ if hl.encoder != nil {
+ source["encoder"] = *hl.encoder
+ if hl.requireFieldMatch != nil {
+ source["require_field_match"] = *hl.requireFieldMatch
+ if hl.boundaryMaxScan != nil {
+ source["boundary_max_scan"] = *hl.boundaryMaxScan
+ if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 {
+ source["boundary_chars"] = hl.boundaryChars
+ if hl.highlighterType != nil {
+ source["type"] = *hl.highlighterType
+ if hl.fragmenter != nil {
+ source["fragmenter"] = *hl.fragmenter
+ if hl.highlightQuery != nil {
+ source["highlight_query"] = hl.highlightQuery.Source()
+ if hl.noMatchSize != nil {
+ source["no_match_size"] = *hl.noMatchSize
+ if hl.phraseLimit != nil {
+ source["phrase_limit"] = *hl.phraseLimit
+ if hl.options != nil && len(hl.options) > 0 {
+ source["options"] = hl.options
+ if hl.forceSource != nil {
+ source["force_source"] = *hl.forceSource
+ if hl.fields != nil && len(hl.fields) > 0 {
+ if hl.useExplicitFieldOrder {
+ // Use a slice for the fields
+ fields := make([]map[string]interface{}, 0)
+ for _, field := range hl.fields {
+ fmap := make(map[string]interface{})
+ fmap[field.Name] = field.Source()
+ fields = append(fields, fmap)
+ source["fields"] = fields
+ // Use a map for the fields
+ fields := make(map[string]interface{}, 0)
+ fields[field.Name] = field.Source()
+ return source
+ highlightS := make(map[string]interface{})
+ if hl.tagsSchema != "" {
+ highlightS["tags_schema"] = hl.tagsSchema
+ if len(hl.preTags) > 0 {
+ highlightS["pre_tags"] = hl.preTags
+ if len(hl.postTags) > 0 {
+ highlightS["post_tags"] = hl.postTags
+ if hl.order != "" {
+ highlightS["order"] = hl.order
+ if hl.encoder != "" {
+ highlightS["encoder"] = hl.encoder
+ highlightS["require_field_match"] = *hl.requireFieldMatch
+ if hl.highlighterType != "" {
+ highlightS["type"] = hl.highlighterType
+ if hl.fragmenter != "" {
+ highlightS["fragmenter"] = hl.fragmenter
+ highlightS["highlight_query"] = hl.highlightQuery.Source()
+ highlightS["no_match_size"] = *hl.noMatchSize
+ if len(hl.options) > 0 {
+ highlightS["options"] = hl.options
+ highlightS["force_source"] = *hl.forceSource
+ if len(hl.fields) > 0 {
+ fieldsS := make(map[string]interface{})
+ fieldsS[field.Name] = field.Source()
+ highlightS["fields"] = fieldsS
+ return highlightS
+// HighlighterField specifies a highlighted field.
+type HighlighterField struct {
+ Name string
+ fragmentSize int
+ fragmentOffset int
+ numOfFragments int
+ boundaryMaxScan int
+ matchedFields []string
+ order string
+ highlighterType string
+ fragmenter string
+func NewHighlighterField(name string) *HighlighterField {
+ return &HighlighterField{
+ Name: name,
+ fragmentSize: -1,
+ fragmentOffset: -1,
+ numOfFragments: -1,
+ boundaryMaxScan: -1,
+ matchedFields: make([]string, 0),
+func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
+ f.preTags = make([]string, 0)
+ f.preTags = append(f.preTags, preTags...)
+ return f
+func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
+ f.postTags = make([]string, 0)
+ f.postTags = append(f.postTags, postTags...)
+func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
+ f.fragmentSize = fragmentSize
+func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
+ f.fragmentOffset = fragmentOffset
+func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
+ f.numOfFragments = numOfFragments
+func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
+ f.highlightFilter = &highlightFilter
+func (f *HighlighterField) Order(order string) *HighlighterField {
+ f.order = &order
+func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
+ f.requireFieldMatch = &requireFieldMatch
+func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
+ f.boundaryMaxScan = boundaryMaxScan
+func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
+ f.boundaryChars = make([]rune, 0)
+ f.boundaryChars = append(f.boundaryChars, boundaryChars...)
+func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
+ f.highlighterType = &highlighterType
+func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
+ f.fragmenter = &fragmenter
+func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
+ f.highlightQuery = highlightQuery
+func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
+ f.noMatchSize = &noMatchSize
+func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
+ f.options = options
+func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
+ f.matchedFields = make([]string, 0)
+ f.matchedFields = append(f.matchedFields, matchedFields...)
+func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
+ f.phraseLimit = &phraseLimit
+func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
+ f.forceSource = &forceSource
+func (f *HighlighterField) Source() interface{} {
+ if f.preTags != nil && len(f.preTags) > 0 {
+ source["pre_tags"] = f.preTags
+ if f.postTags != nil && len(f.postTags) > 0 {
+ source["post_tags"] = f.postTags
+ if f.fragmentSize != -1 {
+ source["fragment_size"] = f.fragmentSize
+ if f.numOfFragments != -1 {
+ source["number_of_fragments"] = f.numOfFragments
+ if f.fragmentOffset != -1 {
+ source["fragment_offset"] = f.fragmentOffset
+ if f.highlightFilter != nil {
+ source["highlight_filter"] = *f.highlightFilter
+ if f.order != nil {
+ source["order"] = *f.order
+ if f.requireFieldMatch != nil {
+ source["require_field_match"] = *f.requireFieldMatch
+ if f.boundaryMaxScan != -1 {
+ source["boundary_max_scan"] = f.boundaryMaxScan
+ if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
+ source["boundary_chars"] = f.boundaryChars
+ if f.highlighterType != nil {
+ source["type"] = *f.highlighterType
+ if f.fragmenter != nil {
+ source["fragmenter"] = *f.fragmenter
+ if f.highlightQuery != nil {
+ source["highlight_query"] = f.highlightQuery.Source()
+ if f.noMatchSize != nil {
+ source["no_match_size"] = *f.noMatchSize
+ if f.matchedFields != nil && len(f.matchedFields) > 0 {
+ source["matched_fields"] = f.matchedFields
+ if f.phraseLimit != nil {
+ source["phrase_limit"] = *f.phraseLimit
+ if f.options != nil && len(f.options) > 0 {
+ source["options"] = f.options
+ if f.forceSource != nil {
+ source["force_source"] = *f.forceSource
@@ -0,0 +1,168 @@
+func TestHighlighterField(t *testing.T) {
+ field := NewHighlighterField("grade")
+ data, err := json.Marshal(field.Source())
+ expected := `{}`
+func TestHighlighterFieldWithOptions(t *testing.T) {
+ field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1)
+ expected := `{"fragment_size":2,"number_of_fragments":1}`
+func TestHighlightWithStringField(t *testing.T) {
+ builder := NewHighlight().Field("grade")
+ expected := `{"fields":{"grade":{}}}`
+func TestHighlightWithFields(t *testing.T) {
+ gradeField := NewHighlighterField("grade")
+ builder := NewHighlight().Fields(gradeField)
+func TestHighlightWithMultipleFields(t *testing.T) {
+ colorField := NewHighlighterField("color")
+ builder := NewHighlight().Fields(gradeField, colorField)
+ expected := `{"fields":{"color":{},"grade":{}}}`
+func TestHighlighterWithExplicitFieldOrder(t *testing.T) {
+ gradeField := NewHighlighterField("grade").FragmentSize(2)
+ colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1)
+ builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true)
+ expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}`
+func TestHighlightWithTermQuery(t *testing.T) {
+ tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."}
+ // Specify highlighter
+ hl := NewHighlight()
+ hl = hl.Fields(NewHighlighterField("message"))
+ hl = hl.PreTags("<em>").PostTags("</em>")
+ query := NewPrefixQuery("message", "golang")
+ Highlight(hl).
+ Query(&query).
+ if searchResult.Hits == nil {
+ t.Fatalf("expected SearchResult.Hits != nil; got nil")
+ if searchResult.Hits.TotalHits != 1 {
+ t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
+ if len(searchResult.Hits.Hits) != 1 {
+ t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
+ hit := searchResult.Hits.Hits[0]
+ if err := json.Unmarshal(*hit.Source, &tw); err != nil {
+ if hit.Highlight == nil || len(hit.Highlight) == 0 {
+ t.Fatal("expected hit to have a highlight; got nil")
+ if hl, found := hit.Highlight["message"]; found {
+ if len(hl) != 1 {
+ t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl))
+ expected := "Welcome to <em>Golang</em> and Elasticsearch."
+ if hl[0] != expected {
+ t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0])
+ t.Fatal("expected to have a highlight on field \"message\"; got none")
@@ -0,0 +1,216 @@
+// IndexResult is the result of indexing a document in Elasticsearch.
+type IndexResult struct {
+ Created bool `json:"created"`
+// IndexService adds documents to Elasticsearch.
+type IndexService struct {
+ ttl string
+func NewIndexService(client *Client) *IndexService {
+ builder := &IndexService{
+func (b *IndexService) Index(name string) *IndexService {
+ b.index = name
+func (b *IndexService) Type(_type string) *IndexService {
+ b._type = _type
+func (b *IndexService) Id(id string) *IndexService {
+func (b *IndexService) Routing(routing string) *IndexService {
+func (b *IndexService) Parent(parent string) *IndexService {
+ b.parent = parent
+// OpType is either "create" or "index" (the default).
+func (b *IndexService) OpType(opType string) *IndexService {
+ b.opType = opType
+func (b *IndexService) Refresh(refresh bool) *IndexService {
+func (b *IndexService) Version(version int64) *IndexService {
+// VersionType is either "internal" (default), "external",
+// "external_gt", "external_gte", or "force".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
+func (b *IndexService) VersionType(versionType string) *IndexService {
+func (b *IndexService) Timestamp(timestamp string) *IndexService {
+ b.timestamp = timestamp
+func (b *IndexService) TTL(ttl string) *IndexService {
+ b.ttl = ttl
+func (b *IndexService) Timeout(timeout string) *IndexService {
+ b.timeout = timeout
+func (b *IndexService) BodyString(body string) *IndexService {
+ b.bodyString = body
+func (b *IndexService) BodyJson(json interface{}) *IndexService {
+ b.bodyJson = json
+func (b *IndexService) Pretty(pretty bool) *IndexService {
+func (b *IndexService) Do() (*IndexResult, error) {
+ var path, method string
+ if b.id != "" {
+ // Create document with manual id
+ method = "PUT"
+ path = "/{index}/{type}/{id}"
+ // Automatic ID generation
+ // See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
+ method = "POST"
+ path = "/{index}/{type}/"
+ path, err := uritemplates.Expand(path, map[string]string{
+ "type": b._type,
+ params.Set("pretty", "true")
+ params.Set("routing", b.routing)
+ if b.parent != "" {
+ params.Set("parent", b.parent)
+ if b.opType != "" {
+ params.Set("op_type", b.opType)
+ if b.refresh != nil && *b.refresh {
+ params.Set("refresh", "true")
+ params.Set("version", fmt.Sprintf("%d", *b.version))
+ params.Set("version_type", b.versionType)
+ if b.timestamp != "" {
+ params.Set("timestamp", b.timestamp)
+ if b.ttl != "" {
+ params.Set("ttl", b.ttl)
+ if b.timeout != "" {
+ params.Set("timeout", b.timeout)
+ // Body
+ if b.bodyJson != nil {
+ body = b.bodyJson
+ body = b.bodyString
+ res, err := b.client.PerformRequest(method, path, params, body)
+ ret := new(IndexResult)
@@ -0,0 +1,145 @@
+// CloseIndexService closes an index.
+// See documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type CloseIndexService struct {
+// NewCloseIndexService creates a new CloseIndexService.
+func NewCloseIndexService(client *Client) *CloseIndexService {
+ return &CloseIndexService{client: client}
+func (s *CloseIndexService) Index(index string) *CloseIndexService {
+// Timeout is an explicit operation timeout.
+func (s *CloseIndexService) Timeout(timeout string) *CloseIndexService {
+func (s *CloseIndexService) MasterTimeout(masterTimeout string) *CloseIndexService {
+func (s *CloseIndexService) IgnoreUnavailable(ignoreUnavailable bool) *CloseIndexService {
+// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
+func (s *CloseIndexService) AllowNoIndices(allowNoIndices bool) *CloseIndexService {
+// concrete indices that are open, closed or both.
+func (s *CloseIndexService) ExpandWildcards(expandWildcards string) *CloseIndexService {
+func (s *CloseIndexService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/{index}/_close", map[string]string{
+func (s *CloseIndexService) Validate() error {
+func (s *CloseIndexService) Do() (*CloseIndexResponse, error) {
+ ret := new(CloseIndexResponse)
+// CloseIndexResponse is the response of CloseIndexService.Do.
+type CloseIndexResponse struct {
+type IndexExistsService struct {
+func NewIndexExistsService(client *Client) *IndexExistsService {
+ builder := &IndexExistsService{
+func (b *IndexExistsService) Index(index string) *IndexExistsService {
+func (b *IndexExistsService) Do() (bool, error) {
+ path, err := uritemplates.Expand("/{index}", map[string]string{
+ res, err := b.client.PerformRequest("HEAD", path, nil, nil)
@@ -0,0 +1,186 @@
+// IndicesGetService retrieves information about one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-index.html.
+type IndicesGetService struct {
+ feature []string
+// NewIndicesGetService creates a new IndicesGetService.
+func NewIndicesGetService(client *Client) *IndicesGetService {
+ return &IndicesGetService{
+ feature: make([]string, 0),
+// Index is a list of index names. Use _all to retrieve information about
+// all indices of a cluster.
+func (s *IndicesGetService) Index(index ...string) *IndicesGetService {
+// Feature is a list of features (e.g. _settings,_mappings,_warmers, and _aliases).
+func (s *IndicesGetService) Feature(feature ...string) *IndicesGetService {
+ s.feature = append(s.feature, feature...)
+// ExpandWildcards indicates whether wildcard expressions should
+// get expanded to open or closed indices (default: open).
+func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService {
+// Local indicates whether to return local information (do not retrieve
+// the state from master node (default: false)).
+func (s *IndicesGetService) Local(local bool) *IndicesGetService {
+// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false).
+func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService {
+// AllowNoIndices indicates whether to ignore if a wildcard expression
+// resolves to no concrete indices (default: false).
+func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService {
+func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService {
+func (s *IndicesGetService) buildURL() (string, url.Values, error) {
+ var path string
+ var index []string
+ if len(s.feature) > 0 {
+ path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{
+ "feature": strings.Join(s.feature, ","),
+ path, err = uritemplates.Expand("/{index}", map[string]string{
+func (s *IndicesGetService) Validate() error {
+func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) {
+ var ret map[string]*IndicesGetResponse
+// IndicesGetResponse is part of the response of IndicesGetService.Do.
+type IndicesGetResponse struct {
+ Aliases map[string]interface{} `json:"aliases"`
+ Mappings map[string]interface{} `json:"mappings"`
+ Settings map[string]interface{} `json:"settings"`
+ Warmers map[string]interface{} `json:"warmers"`
@@ -0,0 +1,189 @@
+// IndicesGetSettingsService allows to retrieve settings of one
+// or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-get-settings.html.
+type IndicesGetSettingsService struct {
+ name []string
+// NewIndicesGetSettingsService creates a new IndicesGetSettingsService.
+func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService {
+ return &IndicesGetSettingsService{
+ name: make([]string, 0),
+// Index is a list of index names; use `_all` or empty string to perform the operation on all indices.
+func (s *IndicesGetSettingsService) Index(index ...string) *IndicesGetSettingsService {
+// Name are the names of the settings that should be included.
+func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService {
+ s.name = append(s.name, name...)
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService {
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService {
+// ExpandWildcards indicates whether to expand wildcard expression
+// to concrete indices that are open, closed or both.
+// Options: open, closed, none, all. Default: open,closed.
+func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService {
+func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService {
+func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService {
+func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService {
+func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) {
+ if len(s.name) > 0 {
+ path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{
+ "name": strings.Join(s.name, ","),
+ path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
+func (s *IndicesGetSettingsService) Validate() error {
+func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) {
+ var ret map[string]*IndicesGetSettingsResponse
+// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do.
+type IndicesGetSettingsResponse struct {
@@ -0,0 +1,81 @@
+func TestIndexGetSettingsURL(t *testing.T) {
+ Names []string
+ "/_all/_settings",
+ []string{"index.merge.*"},
+ "/_all/_settings/index.merge.%2A",
+ []string{"twitter-*"},
+ []string{"index.merge.*", "_settings"},
+ "/twitter-%2A/_settings/index.merge.%2A%2C_settings",
+ "/store-1%2Cstore-2/_settings/index.merge.%2A%2C_settings",
+ path, _, err := client.IndexGetSettings().Index(test.Indices...).Name(test.Names...).buildURL()
+func TestIndexGetSettingsService(t *testing.T) {
+ esversion, err := client.ElasticsearchVersion(DefaultURL)
+ if esversion < "1.4.0" {
+ t.Skip("Index Get API is available since 1.4")
+ res, err := client.IndexGetSettings().Index(testIndexName).Do()
+ t.Fatalf("expected result; got: %v", res)
+ info, found := res[testIndexName]
+ t.Fatalf("expected index %q to be found; got: %v", testIndexName, found)
+ if info == nil {
+ t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info)
+ if info.Settings == nil {
+ t.Fatalf("expected index settings of %q to be != nil; got: %v", testIndexName, info.Settings)
@@ -0,0 +1,84 @@
+func TestIndexGetURL(t *testing.T) {
+ Features []string
+ "/_all",
+ []string{"_mappings"},
+ "/_all/_mappings",
+ []string{"_mappings", "_settings"},
+ "/twitter/_mappings%2C_settings",
+ "/store-1%2Cstore-2/_mappings%2C_settings",
+ path, _, err := client.IndexGet().Index(test.Indices...).Feature(test.Features...).buildURL()
+func TestIndexGetService(t *testing.T) {
+ res, err := client.IndexGet().Index(testIndexName).Do()
+ if info.Mappings == nil {
+ t.Errorf("expected mappings to be != nil; got: %v", info.Mappings)
+ t.Errorf("expected settings to be != nil; got: %v", info.Settings)
+// OpenIndexService opens an index.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type OpenIndexService struct {
+// NewOpenIndexService creates a new OpenIndexService.
+func NewOpenIndexService(client *Client) *OpenIndexService {
+ return &OpenIndexService{client: client}
+// Index is the name of the index to open.
+func (s *OpenIndexService) Index(index string) *OpenIndexService {
+func (s *OpenIndexService) Timeout(timeout string) *OpenIndexService {
+func (s *OpenIndexService) MasterTimeout(masterTimeout string) *OpenIndexService {
+func (s *OpenIndexService) IgnoreUnavailable(ignoreUnavailable bool) *OpenIndexService {
+func (s *OpenIndexService) AllowNoIndices(allowNoIndices bool) *OpenIndexService {
+func (s *OpenIndexService) ExpandWildcards(expandWildcards string) *OpenIndexService {
+func (s *OpenIndexService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/{index}/_open", map[string]string{
+func (s *OpenIndexService) Validate() error {
+func (s *OpenIndexService) Do() (*OpenIndexResponse, error) {
+ ret := new(OpenIndexResponse)
+// OpenIndexResponse is the response of OpenIndexService.Do.
+type OpenIndexResponse struct {
@@ -0,0 +1,517 @@
+ testIndexName = "elastic-test"
+ testIndexName2 = "elastic-test2"
+ testMapping = `
+ "settings":{
+ "number_of_shards":1,
+ "number_of_replicas":0
+ "mappings":{
+ "tweet":{
+ "properties":{
+ "tags":{
+ "type":"string"
+ "location":{
+ "type":"geo_point"
+ "suggest_field":{
+ "type":"completion",
+ "payloads":true
+type tweet struct {
+ Suggest *SuggestField `json:"suggest_field,omitempty"`
+func (t tweet) String() string {
+ return fmt.Sprintf("tweet{User:%q,Message:%q,Retweets:%d}", t.User, t.Message, t.Retweets)
+func isTravis() bool {
+ return os.Getenv("TRAVIS") != ""
+func travisGoVersion() string {
+ return os.Getenv("TRAVIS_GO_VERSION")
+type logger interface {
+ Error(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Fail()
+ FailNow()
+ Log(args ...interface{})
+ Logf(format string, args ...interface{})
+func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) {
+ client, err = NewClient(options...)
+ client.DeleteIndex(testIndexName).Do()
+ client.DeleteIndex(testIndexName2).Do()
+ return client
+func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Client {
+ client := setupTestClient(t, options...)
+ // Create index
+ createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do()
+ if createIndex == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex)
+ // Create second index
+ createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do()
+ if createIndex2 == nil {
+ t.Errorf("expected result to be != nil; got: %v", createIndex2)
+func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client {
+ client := setupTestClientAndCreateIndex(t, options...)
+func TestIndexLifecycle(t *testing.T) {
+ client := setupTestClient(t)
+ createIndex, err := client.CreateIndex(testIndexName).Do()
+ t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+ // Check if index exists
+ indexExists, err := client.IndexExists(testIndexName).Do()
+ if !indexExists {
+ t.Fatalf("index %s should exist, but doesn't\n", testIndexName)
+ // Delete index
+ deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+ t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+ indexExists, err = client.IndexExists(testIndexName).Do()
+ if indexExists {
+ t.Fatalf("index %s should not exist, but does\n", testIndexName)
+func TestIndexExistScenarios(t *testing.T) {
+ // Should return false if index does not exist
+ t.Fatalf("expected index exists to return %v, got %v", false, indexExists)
+ t.Errorf("expected CreateIndexResult.Ack %v; got %v", true, createIndex.Acknowledged)
+ // Should return true if index does not exist
+ t.Fatalf("expected index exists to return %v, got %v", true, indexExists)
+// TODO(oe): Find out why this test fails on Travis CI.
+func TestIndexOpenAndClose(t *testing.T) {
+ defer func() {
+ waitForYellow := func() {
+ // Wait for status yellow
+ res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do()
+ if res != nil && res.TimedOut {
+ t.Fatalf("cluster time out waiting for status %q", "yellow")
+ // Wait for cluster
+ waitForYellow()
+ // Close index
+ cresp, err := client.CloseIndex(testIndexName).Do()
+ if !cresp.Acknowledged {
+ t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName)
+ // Open index again
+ oresp, err := client.OpenIndex(testIndexName).Do()
+ if !oresp.Acknowledged {
+ t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName)
+func TestDocumentLifecycle(t *testing.T) {
+ // Exists
+ // Get document
+ getResult, err := client.Get().
+ if getResult.Index != testIndexName {
+ t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+ if getResult.Type != "tweet" {
+ t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+ if getResult.Id != "1" {
+ t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id)
+ if getResult.Source == nil {
+ t.Errorf("expected GetResult.Source to be != nil; got nil")
+ // Decode the Source field
+ var tweetGot tweet
+ err = json.Unmarshal(*getResult.Source, &tweetGot)
+ if tweetGot.User != tweet1.User {
+ t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
+ if tweetGot.Message != tweet1.Message {
+ t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
+ // Delete document again
+ deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do()
+ if deleteResult == nil {
+ t.Errorf("expected result to be != nil; got: %v", deleteResult)
+ exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+func TestDocumentLifecycleWithAutomaticIDGeneration(t *testing.T) {
+ if indexResult.Id == "" {
+ t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id)
+ id := indexResult.Id
+ exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do()
+ Id(id).
+ if getResult.Id != id {
+ t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id)
+ deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do()
+ exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do()
+func TestIndexCreateExistsOpenCloseDelete(t *testing.T) {
+ // TODO: Find out how to make these test robust
+ t.Skip("test fails regularly with 409 (Conflict): " +
+ "IndexPrimaryShardNotAllocatedException[[elastic-test] " +
+ "primary not allocated post api... skipping")
+ t.Fatalf("expected response; got: %v", createIndex)
+ t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged)
+ t.Fatalf("expected index exists=%v; got %v", true, indexExists)
+ closeIndex, err := client.CloseIndex(testIndexName).Do()
+ if closeIndex == nil {
+ t.Fatalf("expected response; got: %v", closeIndex)
+ if !closeIndex.Acknowledged {
+ t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged)
+ // Open index
+ openIndex, err := client.OpenIndex(testIndexName).Do()
+ if openIndex == nil {
+ t.Fatalf("expected response; got: %v", openIndex)
+ if !openIndex.Acknowledged {
+ t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged)
+ if deleteIndex == nil {
+ t.Fatalf("expected response; got: %v", deleteIndex)
+ t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged)
@@ -0,0 +1,122 @@
+// IndicesDeleteTemplateService deletes index templates.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesDeleteTemplateService struct {
+ name string
+// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
+func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
+ return &IndicesDeleteTemplateService{
+// Name is the name of the template.
+func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
+ s.name = name
+func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
+func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
+func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
+func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+ "name": s.name,
+func (s *IndicesDeleteTemplateService) Validate() error {
+ if s.name == "" {
+ invalid = append(invalid, "Name")
+func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) {
+ ret := new(IndicesDeleteTemplateResponse)
+// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
+type IndicesDeleteTemplateResponse struct {
+ Acknowledged bool `json:"acknowledged,omitempty"`
+// IndicesExistsTemplateService checks if a given template exists.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists
+// for documentation.
+type IndicesExistsTemplateService struct {
+// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
+func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
+ return &IndicesExistsTemplateService{
+func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
+// Local indicates whether to return local information, i.e. do not retrieve
+func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
+func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
+func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
+func (s *IndicesExistsTemplateService) Validate() error {
+func (s *IndicesExistsTemplateService) Do() (bool, error) {
+ res, err := s.client.PerformRequest("HEAD", path, params, nil)
@@ -0,0 +1,68 @@
+func TestIndexExistsTemplate(t *testing.T) {
+ "template":"elastic-test*",
+ putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do()
+ if !putres.Acknowledged {
+ t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged)
+ defer client.IndexDeleteTemplate("elastic-template").Do()
+ // Check if template exists
+ exists, err := client.IndexTemplateExists("elastic-template").Do()
+ t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists)
+ getres, err := client.IndexGetTemplate("elastic-template").Do()
+ t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres)
@@ -0,0 +1,155 @@
+// IndicesExistsTypeService checks if one or more types exist in one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-types-exists.html.
+type IndicesExistsTypeService struct {
+// NewIndicesExistsTypeService creates a new IndicesExistsTypeService.
+func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
+ return &IndicesExistsTypeService{
+// Index is a list of index names; use `_all` to check the types across all indices.
+func (s *IndicesExistsTypeService) Index(index ...string) *IndicesExistsTypeService {
+// Type is a list of document types to check.
+func (s *IndicesExistsTypeService) Type(typ ...string) *IndicesExistsTypeService {
+func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService {
+func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService {
+func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService {
+// Local specifies whether to return local information, i.e. do not retrieve
+func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService {
+func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService {
+func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/{index}/{type}", map[string]string{
+func (s *IndicesExistsTypeService) Validate() error {
+func (s *IndicesExistsTypeService) Do() (bool, error) {
@@ -0,0 +1,121 @@
+func TestTypeExistsBuildURL(t *testing.T) {
+ ExpectValidateFailure bool
+ "",
+ true,
+ []string{"index1"},
+ []string{"type1"},
+ "/index1/type1",
+ false,
+ []string{"index1", "index2"},
+ "/index1%2Cindex2/type1",
+ []string{"type1", "type2"},
+ "/index1%2Cindex2/type1%2Ctype2",
+ err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate()
+ if err == nil && test.ExpectValidateFailure {
+ t.Errorf("case #%d: expected validate to fail", i+1)
+ if err != nil && !test.ExpectValidateFailure {
+ t.Errorf("case #%d: expected validate to succeed", i+1)
+ if !test.ExpectValidateFailure {
+ path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL()
+ t.Fatalf("case #%d: %v", i+1, err)
+ t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+func TestTypeExists(t *testing.T) {
+ // Create index with tweet type
+ // Check if type exists
+ exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do()
+ t.Fatalf("type %s should exist in index %s, but doesn't\n", "tweet", testIndexName)
+ exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do()
+ t.Fatalf("type %s should not exist in index %s, but it does\n", "tweet", testIndexName)
@@ -0,0 +1,128 @@
+// IndicesGetTemplateService returns an index template.
+type IndicesGetTemplateService struct {
+// NewIndicesGetTemplateService creates a new IndicesGetTemplateService.
+func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService {
+ return &IndicesGetTemplateService{
+// Name is the name of the index template.
+func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService {
+// FlatSettings is returns settings in flat format (default: false).
+func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService {
+func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService {
+func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService {
+func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) {
+ path, err = uritemplates.Expand("/_template/{name}", map[string]string{
+ path = "/_template"
+func (s *IndicesGetTemplateService) Validate() error {
+func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) {
+ var ret map[string]*IndicesGetTemplateResponse
+// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do.
+type IndicesGetTemplateResponse struct {
+ Order int `json:"order,omitempty"`
+ Template string `json:"template,omitempty"`
+ Settings map[string]interface{} `json:"settings,omitempty"`
+ Mappings map[string]interface{} `json:"mappings,omitempty"`
+ Aliases map[string]interface{} `json:"aliases,omitempty"`
+func TestIndexGetTemplateURL(t *testing.T) {
+ "/_template",
+ "/_template/index1",
+ "/_template/index1%2Cindex2",
+ path, _, err := client.IndexGetTemplate().Name(test.Names...).buildURL()
@@ -0,0 +1,179 @@
+// IndicesPutTemplateService creates or updates index mappings.
+type IndicesPutTemplateService struct {
+ order interface{}
+ create *bool
+// NewIndicesPutTemplateService creates a new IndicesPutTemplateService.
+func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService {
+ return &IndicesPutTemplateService{
+func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService {
+func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService {
+func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService {
+func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService {
+// Order is the order for this template when merging multiple matching ones
+// (higher numbers are merged later, overriding the lower numbers).
+func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService {
+ s.order = order
+// Create indicates whether the index template should only be added if
+// new or can also replace an existing one.
+func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService {
+ s.create = &create
+func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService {
+// BodyJson is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService {
+// BodyString is documented as: The template definition.
+func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService {
+func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) {
+ if s.order != nil {
+ params.Set("order", fmt.Sprintf("%v", s.order))
+ if s.create != nil {
+ params.Set("create", fmt.Sprintf("%v", *s.create))
+func (s *IndicesPutTemplateService) Validate() error {
+ if s.bodyString == "" && s.bodyJson == nil {
+ invalid = append(invalid, "BodyJson")
+func (s *IndicesPutTemplateService) Do() (*IndicesPutTemplateResponse, error) {
+ res, err := s.client.PerformRequest("PUT", path, params, body)
+ ret := new(IndicesPutTemplateResponse)
+// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do.
+type IndicesPutTemplateResponse struct {
@@ -0,0 +1,385 @@
+// IndicesStatsService provides stats on various metrics of one or more
+// indices. See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-stats.html.
+type IndicesStatsService struct {
+ metric []string
+ completionFields []string
+ fielddataFields []string
+ groups []string
+ human *bool
+// NewIndicesStatsService creates a new IndicesStatsService.
+func NewIndicesStatsService(client *Client) *IndicesStatsService {
+ return &IndicesStatsService{
+ metric: make([]string, 0),
+ completionFields: make([]string, 0),
+ fielddataFields: make([]string, 0),
+ groups: make([]string, 0),
+ types: make([]string, 0),
+// Metric limits the information returned the specific metrics. Options are:
+// docs, store, indexing, get, search, completion, fielddata, flush, merge,
+// query_cache, refresh, suggest, and warmer.
+func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService {
+ s.metric = append(s.metric, metric...)
+// Index is the list of index names; use `_all` or empty string to perform
+func (s *IndicesStatsService) Index(index ...string) *IndicesStatsService {
+// Level returns stats aggregated at cluster, index or shard level.
+func (s *IndicesStatsService) Level(level string) *IndicesStatsService {
+// Types is a list of document types for the `indexing` index metric.
+func (s *IndicesStatsService) Types(types ...string) *IndicesStatsService {
+// CompletionFields is a list of fields for `fielddata` and `suggest`
+// index metric (supports wildcards).
+func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService {
+ s.completionFields = append(s.completionFields, completionFields...)
+// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards).
+func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService {
+ s.fielddataFields = append(s.fielddataFields, fielddataFields...)
+// Fields is a list of fields for `fielddata` and `completion` index metric
+// (supports wildcards).
+func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService {
+// Groups is a list of search groups for `search` index metric.
+func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService {
+ s.groups = append(s.groups, groups...)
+// Human indicates whether to return time and byte values in human-readable format..
+func (s *IndicesStatsService) Human(human bool) *IndicesStatsService {
+ s.human = &human
+func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService {
+func (s *IndicesStatsService) buildURL() (string, url.Values, error) {
+ if len(s.index) > 0 && len(s.metric) > 0 {
+ path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{
+ "metric": strings.Join(s.metric, ","),
+ } else if len(s.index) > 0 {
+ path, err = uritemplates.Expand("/{index}/_stats", map[string]string{
+ } else if len(s.metric) > 0 {
+ path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{
+ path = "/_stats"
+ if len(s.groups) > 0 {
+ params.Set("groups", strings.Join(s.groups, ","))
+ if s.human != nil {
+ params.Set("human", fmt.Sprintf("%v", *s.human))
+ if len(s.types) > 0 {
+ params.Set("types", strings.Join(s.types, ","))
+ if len(s.completionFields) > 0 {
+ params.Set("completion_fields", strings.Join(s.completionFields, ","))
+ if len(s.fielddataFields) > 0 {
+ params.Set("fielddata_fields", strings.Join(s.fielddataFields, ","))
+func (s *IndicesStatsService) Validate() error {
+func (s *IndicesStatsService) Do() (*IndicesStatsResponse, error) {
+ ret := new(IndicesStatsResponse)
+// IndicesStatsResponse is the response of IndicesStatsService.Do.
+type IndicesStatsResponse struct {
+ // Shards provides information returned from shards.
+ // All provides summary stats about all indices.
+ All *IndexStats `json:"_all,omitempty"`
+ // Indices provides a map into the stats of an index. The key of the
+ // map is the index name.
+ Indices map[string]*IndexStats `json:"indices,omitempty"`
+// IndexStats is index stats for a specific index.
+type IndexStats struct {
+ Primaries *IndexStatsDetails `json:"primaries,omitempty"`
+ Total *IndexStatsDetails `json:"total,omitempty"`
+type IndexStatsDetails struct {
+ Docs *IndexStatsDocs `json:"docs,omitempty"`
+ Store *IndexStatsStore `json:"store,omitempty"`
+ Indexing *IndexStatsIndexing `json:"indexing,omitempty"`
+ Get *IndexStatsGet `json:"get,omitempty"`
+ Search *IndexStatsSearch `json:"search,omitempty"`
+ Merges *IndexStatsMerges `json:"merges,omitempty"`
+ Refresh *IndexStatsRefresh `json:"refresh,omitempty"`
+ Flush *IndexStatsFlush `json:"flush,omitempty"`
+ Warmer *IndexStatsWarmer `json:"warmer,omitempty"`
+ FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"`
+ IdCache *IndexStatsIdCache `json:"id_cache,omitempty"`
+ Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"`
+ Percolate *IndexStatsPercolate `json:"percolate,omitempty"`
+ Completion *IndexStatsCompletion `json:"completion,omitempty"`
+ Segments *IndexStatsSegments `json:"segments,omitempty"`
+ Translog *IndexStatsTranslog `json:"translog,omitempty"`
+ Suggest *IndexStatsSuggest `json:"suggest,omitempty"`
+ QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"`
+type IndexStatsDocs struct {
+ Count int64 `json:"count,omitempty"`
+ Deleted int64 `json:"deleted,omitempty"`
+type IndexStatsStore struct {
+ Size string `json:"size,omitempty"` // human size, e.g. 119.3mb
+ SizeInBytes int64 `json:"size_in_bytes,omitempty"`
+ ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s
+ ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"`
+type IndexStatsIndexing struct {
+ IndexTotal int64 `json:"index_total,omitempty"`
+ IndexTime string `json:"index_time,omitempty"`
+ IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"`
+ IndexCurrent int64 `json:"index_current,omitempty"`
+ DeleteTotal int64 `json:"delete_total,omitempty"`
+ DeleteTime string `json:"delete_time,omitempty"`
+ DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"`
+ DeleteCurrent int64 `json:"delete_current,omitempty"`
+ NoopUpdateTotal int64 `json:"noop_update_total,omitempty"`
+ IsThrottled bool `json:"is_throttled,omitempty"`
+ ThrottleTime string `json:"throttle_time,omitempty"`
+type IndexStatsGet struct {
+ Total int64 `json:"total,omitempty"`
+ GetTime string `json:"get_time,omitempty"`
+ TimeInMillis int64 `json:"time_in_millis,omitempty"`
+ ExistsTotal int64 `json:"exists_total,omitempty"`
+ ExistsTime string `json:"exists_time,omitempty"`
+ ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"`
+ MissingTotal int64 `json:"missing_total,omitempty"`
+ MissingTime string `json:"missing_time,omitempty"`
+ MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"`
+ Current int64 `json:"current,omitempty"`
+type IndexStatsSearch struct {
+ OpenContexts int64 `json:"open_contexts,omitempty"`
+ QueryTotal int64 `json:"query_total,omitempty"`
+ QueryTime string `json:"query_time,omitempty"`
+ QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"`
+ QueryCurrent int64 `json:"query_current,omitempty"`
+ FetchTotal int64 `json:"fetch_total,omitempty"`
+ FetchTime string `json:"fetch_time,omitempty"`
+ FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"`
+ FetchCurrent int64 `json:"fetch_current,omitempty"`
+type IndexStatsMerges struct {
+ CurrentDocs int64 `json:"current_docs,omitempty"`
+ CurrentSize string `json:"current_size,omitempty"`
+ CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"`
+ TotalTime string `json:"total_time,omitempty"`
+ TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"`
+ TotalDocs int64 `json:"total_docs,omitempty"`
+ TotalSize string `json:"total_size,omitempty"`
+ TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"`
+type IndexStatsRefresh struct {
+type IndexStatsFlush struct {
+type IndexStatsWarmer struct {
+type IndexStatsFilterCache struct {
+ MemorySize string `json:"memory_size,omitempty"`
+ MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"`
+ Evictions int64 `json:"evictions,omitempty"`
+type IndexStatsIdCache struct {
+type IndexStatsFielddata struct {
+type IndexStatsPercolate struct {
+ Queries int64 `json:"queries,omitempty"`
+type IndexStatsCompletion struct {
+ Size string `json:"size,omitempty"`
+type IndexStatsSegments struct {
+ Memory string `json:"memory,omitempty"`
+ MemoryInBytes int64 `json:"memory_in_bytes,omitempty"`
+ IndexWriterMemory string `json:"index_writer_memory,omitempty"`
+ IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"`
+ IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"`
+ IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"`
+ VersionMapMemory string `json:"version_map_memory,omitempty"`
+ VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"`
+ FixedBitSetMemory string `json:"fixed_bit_set,omitempty"`
+ FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"`
+type IndexStatsTranslog struct {
+ Operations int64 `json:"operations,omitempty"`
+type IndexStatsSuggest struct {
+ Time string `json:"time,omitempty"`
+type IndexStatsQueryCache struct {
+ HitCount int64 `json:"hit_count,omitempty"`
+ MissCount int64 `json:"miss_count,omitempty"`
@@ -0,0 +1,85 @@
+func TestIndexStatsBuildURL(t *testing.T) {
+ Metrics []string
+ "/_stats",
+ "/index1/_stats",
+ []string{"metric1"},
+ "/_stats/metric1",
+ "/index1/_stats/metric1",
+ "/index1%2Cindex2/_stats/metric1",
+ []string{"metric1", "metric2"},
+ "/index1%2Cindex2/_stats/metric1%2Cmetric2",
+ path, _, err := client.IndexStats().Index(test.Indices...).Metric(test.Metrics...).buildURL()
+func TestIndexStats(t *testing.T) {
+ client := setupTestClientAndCreateIndexAndAddDocs(t)
+ stats, err := client.IndexStats(testIndexName).Do()
+ if stats == nil {
+ t.Fatalf("expected response; got: %v", stats)
+ stat, found := stats.Indices[testIndexName]
+ t.Fatalf("expected stats about index %q; got: %v", testIndexName, found)
+ if stat.Total == nil {
+ t.Fatalf("expected total to be != nil; got: %v", stat.Total)
+ if stat.Total.Docs == nil {
+ t.Fatalf("expected total docs to be != nil; got: %v", stat.Total.Docs)
+ if stat.Total.Docs.Count == 0 {
+ t.Fatalf("expected total docs count to be > 0; got: %d", stat.Total.Docs.Count)
@@ -0,0 +1,194 @@
+type MultiGetService struct {
+ items []*MultiGetItem
+func NewMultiGetService(client *Client) *MultiGetService {
+ builder := &MultiGetService{
+ items: make([]*MultiGetItem, 0),
+func (b *MultiGetService) Preference(preference string) *MultiGetService {
+func (b *MultiGetService) Refresh(refresh bool) *MultiGetService {
+func (b *MultiGetService) Realtime(realtime bool) *MultiGetService {
+func (b *MultiGetService) Add(items ...*MultiGetItem) *MultiGetService {
+ b.items = append(b.items, items...)
+func (b *MultiGetService) Source() interface{} {
+ items := make([]interface{}, len(b.items))
+ for i, item := range b.items {
+ items[i] = item.Source()
+ source["docs"] = items
+func (b *MultiGetService) Do() (*MultiGetResult, error) {
+ path := "/_mget"
+ body := b.Source()
+ res, err := b.client.PerformRequest("GET", path, params, body)
+ ret := new(MultiGetResult)
+// -- Multi Get Item --
+// MultiGetItem is a single document to retrieve via the MultiGetService.
+type MultiGetItem struct {
+ version *int64 // see org.elasticsearch.common.lucene.uid.Versions
+ versionType string // see org.elasticsearch.index.VersionType
+func NewMultiGetItem() *MultiGetItem {
+ return &MultiGetItem{}
+func (item *MultiGetItem) Index(index string) *MultiGetItem {
+ item.index = index
+ return item
+func (item *MultiGetItem) Type(typ string) *MultiGetItem {
+ item.typ = typ
+func (item *MultiGetItem) Id(id string) *MultiGetItem {
+ item.id = id
+func (item *MultiGetItem) Routing(routing string) *MultiGetItem {
+ item.routing = routing
+func (item *MultiGetItem) Fields(fields ...string) *MultiGetItem {
+ if item.fields == nil {
+ item.fields = make([]string, 0)
+ item.fields = append(item.fields, fields...)
+// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1),
+// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions.
+// The default in Elasticsearch is MatchAny (-3).
+func (item *MultiGetItem) Version(version int64) *MultiGetItem {
+ item.version = &version
+// VersionType can be "internal", "external", "external_gt", "external_gte",
+// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source.
+// It is "internal" by default.
+func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem {
+ item.versionType = versionType
+func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem {
+ item.fsc = fetchSourceContext
+// Source returns the serialized JSON to be sent to Elasticsearch as
+// part of a MultiGet search.
+func (item *MultiGetItem) Source() interface{} {
+ source["_id"] = item.id
+ if item.index != "" {
+ source["_index"] = item.index
+ if item.typ != "" {
+ source["_type"] = item.typ
+ if item.fsc != nil {
+ source["_source"] = item.fsc.Source()
+ if item.fields != nil {
+ source["_fields"] = item.fields
+ if item.routing != "" {
+ source["_routing"] = item.routing
+ if item.version != nil {
+ source["version"] = fmt.Sprintf("%d", *item.version)
+ if item.versionType != "" {
+ source["version_type"] = item.versionType
+// -- Result of a Multi Get request.
+type MultiGetResult struct {
+ Docs []*GetResult `json:"docs,omitempty"`
@@ -0,0 +1,95 @@
+func TestMultiGet(t *testing.T) {
+ // Add some documents
+ // Get documents 1 and 3
+ res, err := client.MultiGet().
+ Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("1")).
+ Add(NewMultiGetItem().Index(testIndexName).Type("tweet").Id("3")).
+ t.Fatal("expected result to be != nil; got nil")
+ if res.Docs == nil {
+ t.Fatal("expected result docs to be != nil; got nil")
+ if len(res.Docs) != 2 {
+ t.Fatalf("expected to have 2 docs; got %d", len(res.Docs))
+ item := res.Docs[0]
+ if item.Error != "" {
+ t.Errorf("expected no error on item 0; got %q", item.Error)
+ if item.Source == nil {
+ t.Errorf("expected Source != nil; got %v", item.Source)
+ var doc tweet
+ if err := json.Unmarshal(*item.Source, &doc); err != nil {
+ t.Fatalf("expected to unmarshal item Source; got %v", err)
+ if doc.Message != tweet1.Message {
+ t.Errorf("expected Message of first tweet to be %q; got %q", tweet1.Message, doc.Message)
+ item = res.Docs[1]
+ t.Errorf("expected no error on item 1; got %q", item.Error)
+ if doc.Message != tweet3.Message {
+ t.Errorf("expected Message of second tweet to be %q; got %q", tweet3.Message, doc.Message)
@@ -0,0 +1,101 @@
+// MultiSearch executes one or more searches in one roundtrip.
+// See http://www.elasticsearch.org/guide/reference/api/multi-search/
+type MultiSearchService struct {
+ requests []*SearchRequest
+func NewMultiSearchService(client *Client) *MultiSearchService {
+ builder := &MultiSearchService{
+ requests: make([]*SearchRequest, 0),
+func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService {
+ s.requests = append(s.requests, requests...)
+func (s *MultiSearchService) Index(index string) *MultiSearchService {
+func (s *MultiSearchService) Indices(indices ...string) *MultiSearchService {
+func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService {
+func (s *MultiSearchService) Do() (*MultiSearchResult, error) {
+ path := "/_msearch"
+ lines := make([]string, 0)
+ for _, sr := range s.requests {
+ // Set default indices if not specified in the request
+ if !sr.HasIndices() && len(s.indices) > 0 {
+ sr = sr.Indices(s.indices...)
+ header, err := json.Marshal(sr.header())
+ body, err := json.Marshal(sr.body())
+ lines = append(lines, string(header))
+ lines = append(lines, string(body))
+ body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n
+ ret := new(MultiSearchResult)
+type MultiSearchResult struct {
+ Responses []*SearchResult `json:"responses,omitempty"`
@@ -0,0 +1,197 @@
+func TestMultiSearch(t *testing.T) {
+ tweet1 := tweet{
+ User: "olivere",
+ Message: "Welcome to Golang and Elasticsearch.",
+ Tags: []string{"golang", "elasticsearch"},
+ tweet2 := tweet{
+ Message: "Another unrelated topic.",
+ Tags: []string{"golang"},
+ tweet3 := tweet{
+ User: "sandrae",
+ Message: "Cycling is fun.",
+ Tags: []string{"sports", "cycling"},
+ // Spawn two search queries with one roundtrip
+ q1 := NewMatchAllQuery()
+ q2 := NewTermQuery("tags", "golang")
+ sreq1 := NewSearchRequest().Indices(testIndexName, testIndexName2).
+ Source(NewSearchSource().Query(q1).Size(10))
+ sreq2 := NewSearchRequest().Index(testIndexName).Type("tweet").
+ Source(NewSearchSource().Query(q2))
+ searchResult, err := client.MultiSearch().
+ Add(sreq1, sreq2).
+ if searchResult.Responses == nil {
+ t.Fatal("expected responses != nil; got nil")
+ if len(searchResult.Responses) != 2 {
+ t.Fatalf("expected 2 responses; got %d", len(searchResult.Responses))
+ sres := searchResult.Responses[0]
+ if sres.Hits == nil {
+ t.Errorf("expected Hits != nil; got nil")
+ if sres.Hits.TotalHits != 3 {
+ t.Errorf("expected Hits.TotalHits = %d; got %d", 3, sres.Hits.TotalHits)
+ if len(sres.Hits.Hits) != 3 {
+ t.Errorf("expected len(Hits.Hits) = %d; got %d", 3, len(sres.Hits.Hits))
+ for _, hit := range sres.Hits.Hits {
+ if hit.Index != testIndexName {
+ t.Errorf("expected Hits.Hit.Index = %q; got %q", testIndexName, hit.Index)
+ item := make(map[string]interface{})
+ err := json.Unmarshal(*hit.Source, &item)
+ sres = searchResult.Responses[1]
+ if sres.Hits.TotalHits != 2 {
+ t.Errorf("expected Hits.TotalHits = %d; got %d", 2, sres.Hits.TotalHits)
+ if len(sres.Hits.Hits) != 2 {
+ t.Errorf("expected len(Hits.Hits) = %d; got %d", 2, len(sres.Hits.Hits))
+func TestMultiSearchWithOneRequest(t *testing.T) {
+ query := NewMatchAllQuery()
+ source := NewSearchSource().Query(query).Size(10)
+ sreq := NewSearchRequest().Source(source)
+ Add(sreq).
+ if len(searchResult.Responses) != 1 {
+ t.Fatalf("expected 1 responses; got %d", len(searchResult.Responses))
@@ -0,0 +1,311 @@
+// NodesInfoService allows to retrieve one or more or all of the
+// cluster nodes information.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html.
+type NodesInfoService struct {
+ nodeId []string
+// NewNodesInfoService creates a new NodesInfoService.
+func NewNodesInfoService(client *Client) *NodesInfoService {
+ return &NodesInfoService{
+ nodeId: []string{"_all"},
+ metric: []string{"_all"},
+// NodeId is a list of node IDs or names to limit the returned information.
+// Use "_local" to return information from the node you're connecting to,
+// leave empty to get information from all nodes.
+func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService {
+ s.nodeId = make([]string, 0)
+ s.nodeId = append(s.nodeId, nodeId...)
+// Metric is a list of metrics you wish returned. Leave empty to return all.
+// Valid metrics are: settings, os, process, jvm, thread_pool, network,
+// transport, http, and plugins.
+func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService {
+ s.metric = make([]string, 0)
+// FlatSettings returns settings in flat format (default: false).
+func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService {
+// Human indicates whether to return time and byte values in human-readable format.
+func (s *NodesInfoService) Human(human bool) *NodesInfoService {
+// Pretty indicates whether to indent the returned JSON.
+func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService {
+func (s *NodesInfoService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{
+ "node_id": strings.Join(s.nodeId, ","),
+func (s *NodesInfoService) Validate() error {
+func (s *NodesInfoService) Do() (*NodesInfoResponse, error) {
+ ret := new(NodesInfoResponse)
+// NodesInfoResponse is the response of NodesInfoService.Do.
+type NodesInfoResponse struct {
+ Nodes map[string]*NodesInfoNode `json:"nodes"`
+type NodesInfoNode struct {
+ // Name of the node, e.g. "Mister Fear"
+ Name string `json:"name"`
+ // TransportAddress, e.g. "inet[/127.0.0.1:9300]"
+ TransportAddress string `json:"transport_address"`
+ // Host is the host name, e.g. "macbookair"
+ Host string `json:"host"`
+ // IP is the IP address, e.g. "192.168.1.2"
+ IP string `json:"ip"`
+ // Version is the Elasticsearch version running on the node, e.g. "1.4.3"
+ Version string `json:"version"`
+ // Build is the Elasticsearch build, e.g. "36a29a7"
+ Build string `json:"build"`
+ // HTTPAddress, e.g. "inet[/127.0.0.1:9200]"
+ HTTPAddress string `json:"http_address"`
+ // HTTPSAddress, e.g. "inet[/127.0.0.1:9200]"
+ HTTPSAddress string `json:"https_address"`
+ // Settings of the node, e.g. paths and pidfile.
+ // OS information, e.g. CPU and memory.
+ OS *NodesInfoNodeOS `json:"os"`
+ // Process information, e.g. max file descriptors.
+ Process *NodesInfoNodeProcess `json:"process"`
+ // JVM information, e.g. VM version.
+ JVM *NodesInfoNodeProcess `json:"jvm"`
+ // ThreadPool information.
+ ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"`
+ // Network information.
+ Network *NodesInfoNodeNetwork `json:"network"`
+ Transport *NodesInfoNodeTransport `json:"transport"`
+ // HTTP information.
+ HTTP *NodesInfoNodeHTTP `json:"http"`
+ // Plugins information.
+ Plugins []*NodesInfoNodePlugin `json:"plugins"`
+type NodesInfoNodeOS struct {
+ RefreshInterval string `json:"refresh_interval"` // e.g. 1s
+ RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000
+ AvailableProcessors int `json:"available_processors"` // e.g. 4
+ // CPU information
+ CPU struct {
+ Vendor string `json:"vendor"` // e.g. Intel
+ Model string `json:"model"` // e.g. iMac15,1
+ MHz int `json:"mhz"` // e.g. 3500
+ TotalCores int `json:"total_cores"` // e.g. 4
+ TotalSockets int `json:"total_sockets"` // e.g. 4
+ CoresPerSocket int `json:"cores_per_socket"` // e.g. 16
+ CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256
+ } `json:"cpu"`
+ // Mem information
+ Mem struct {
+ Total string `json:"total"` // e.g. 16gb
+ TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184
+ } `json:"mem"`
+ // Swap information
+ Swap struct {
+ Total string `json:"total"` // e.g. 1gb
+ TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824
+ } `json:"swap"`
+type NodesInfoNodeProcess struct {
+ ID int `json:"id"` // process id, e.g. 87079
+ MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768
+ Mlockall bool `json:"mlockall"` // e.g. false
+type NodesInfoNodeJVM struct {
+ PID int `json:"pid"` // process id, e.g. 87079
+ Version string `json:"version"` // e.g. "1.8.0_25"
+ VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM"
+ VMVersion string `json:"vm_version"` // e.g. "25.25-b02"
+ VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation"
+ StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z"
+ StartTimeInMillis int64 `json:"start_time_in_millis"`
+ HeapInit string `json:"heap_init"` // e.g. 1gb
+ HeapInitInBytes int `json:"heap_init_in_bytes"`
+ HeapMax string `json:"heap_max"` // e.g. 4gb
+ HeapMaxInBytes int `json:"heap_max_in_bytes"`
+ NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb
+ NonHeapInitInBytes int `json:"non_heap_init_in_bytes"`
+ NonHeapMax string `json:"non_heap_max"` // e.g. 0b
+ NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"`
+ DirectMax string `json:"direct_max"` // e.g. 4gb
+ DirectMaxInBytes int `json:"direct_max_in_bytes"`
+ GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"]
+ MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"]
+type NodesInfoNodeThreadPool struct {
+ Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"`
+ Bench *NodesInfoNodeThreadPoolSection `json:"bench"`
+ Listener *NodesInfoNodeThreadPoolSection `json:"listener"`
+ Index *NodesInfoNodeThreadPoolSection `json:"index"`
+ Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"`
+ Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"`
+ Generic *NodesInfoNodeThreadPoolSection `json:"generic"`
+ Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"`
+ Search *NodesInfoNodeThreadPoolSection `json:"search"`
+ Flush *NodesInfoNodeThreadPoolSection `json:"flush"`
+ Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"`
+ Management *NodesInfoNodeThreadPoolSection `json:"management"`
+ Get *NodesInfoNodeThreadPoolSection `json:"get"`
+ Merge *NodesInfoNodeThreadPoolSection `json:"merge"`
+ Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"`
+ Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"`
+type NodesInfoNodeThreadPoolSection struct {
+ Type string `json:"type"` // e.g. fixed
+ Min int `json:"min"` // e.g. 4
+ Max int `json:"max"` // e.g. 4
+ KeepAlive string `json:"keep_alive"` // e.g. "5m"
+ QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1
+type NodesInfoNodeNetwork struct {
+ PrimaryInterface struct {
+ Address string `json:"address"` // e.g. 192.168.1.2
+ Name string `json:"name"` // e.g. en0
+ MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66
+ } `json:"primary_interface"`
+type NodesInfoNodeTransport struct {
+ BoundAddress string `json:"bound_address"` // e.g. inet[/127.0.0.1:9300]
+ PublishAddress string `json:"publish_address"` // e.g. inet[/127.0.0.1:9300]
+type NodesInfoNodeHTTP struct {
+ MaxContentLength string `json:"max_content_length"` // e.g. "100mb"
+ MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"`
+type NodesInfoNodePlugin struct {
+ Description string `json:"description"`
+ Site bool `json:"site"`
+ JVM bool `json:"jvm"`
+ URL string `json:"url"` // e.g. /_plugin/dummy/
+func TestNodesInfo(t *testing.T) {
+ info, err := client.NodesInfo().Do()
+ t.Fatal("expected nodes info")
+ if info.ClusterName == "" {
+ t.Errorf("expected cluster name; got: %q", info.ClusterName)
+ if len(info.Nodes) == 0 {
+ t.Errorf("expected some nodes; got: %d", len(info.Nodes))
+ for id, node := range info.Nodes {
+ if id == "" {
+ t.Errorf("expected node id; got: %q", id)
+ if node == nil {
+ t.Fatalf("expected node info; got: %v", node)
+ if node.IP == "" {
+ t.Errorf("expected node IP; got: %q", node.IP)
@@ -0,0 +1,135 @@
+type OptimizeService struct {
+ maxNumSegments *int
+ onlyExpungeDeletes *bool
+ flush *bool
+ waitForMerge *bool
+func NewOptimizeService(client *Client) *OptimizeService {
+ builder := &OptimizeService{
+func (s *OptimizeService) Index(index string) *OptimizeService {
+func (s *OptimizeService) Indices(indices ...string) *OptimizeService {
+func (s *OptimizeService) MaxNumSegments(maxNumSegments int) *OptimizeService {
+ s.maxNumSegments = &maxNumSegments
+func (s *OptimizeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *OptimizeService {
+ s.onlyExpungeDeletes = &onlyExpungeDeletes
+func (s *OptimizeService) Flush(flush bool) *OptimizeService {
+ s.flush = &flush
+func (s *OptimizeService) WaitForMerge(waitForMerge bool) *OptimizeService {
+ s.waitForMerge = &waitForMerge
+func (s *OptimizeService) Force(force bool) *OptimizeService {
+func (s *OptimizeService) Pretty(pretty bool) *OptimizeService {
+func (s *OptimizeService) Do() (*OptimizeResult, error) {
+ path += "/_optimize"
+ if s.maxNumSegments != nil {
+ params.Set("max_num_segments", fmt.Sprintf("%d", *s.maxNumSegments))
+ if s.onlyExpungeDeletes != nil {
+ params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes))
+ if s.flush != nil {
+ params.Set("flush", fmt.Sprintf("%v", *s.flush))
+ if s.waitForMerge != nil {
+ params.Set("wait_for_merge", fmt.Sprintf("%v", *s.waitForMerge))
+ ret := new(OptimizeResult)
+// -- Result of an optimize request.
+type OptimizeResult struct {
+func TestOptimize(t *testing.T) {
+ // Optimize documents
+ res, err := client.Optimize(testIndexName, testIndexName2).Do()
+ t.Fatal("expected result; got nil")
+// PercolateService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-percolate.html.
+type PercolateService struct {
+ routing []string
+ percolateIndex string
+ percolatePreference string
+ percolateRouting string
+ percolateFormat string
+ percolateType string
+// NewPercolateService creates a new PercolateService.
+func NewPercolateService(client *Client) *PercolateService {
+ return &PercolateService{
+ routing: make([]string, 0),
+// Index is the name of the index of the document being percolated.
+func (s *PercolateService) Index(index string) *PercolateService {
+// Type is the type of the document being percolated.
+func (s *PercolateService) Type(typ string) *PercolateService {
+// Id is to substitute the document in the request body with a
+// document that is known by the specified id. On top of the id,
+// the index and type parameter will be used to retrieve
+// the document from within the cluster.
+func (s *PercolateService) Id(id string) *PercolateService {
+// ExpandWildcards indicates whether to expand wildcard expressions
+func (s *PercolateService) ExpandWildcards(expandWildcards string) *PercolateService {
+// PercolateFormat indicates whether to return an array of matching
+// query IDs instead of objects.
+func (s *PercolateService) PercolateFormat(percolateFormat string) *PercolateService {
+ s.percolateFormat = percolateFormat
+// PercolateType is the type to percolate document into. Defaults to type.
+func (s *PercolateService) PercolateType(percolateType string) *PercolateService {
+ s.percolateType = percolateType
+// PercolateRouting is the routing value to use when percolating
+// the existing document.
+func (s *PercolateService) PercolateRouting(percolateRouting string) *PercolateService {
+ s.percolateRouting = percolateRouting
+// Source is the URL-encoded request definition.
+func (s *PercolateService) Source(source string) *PercolateService {
+func (s *PercolateService) AllowNoIndices(allowNoIndices bool) *PercolateService {
+func (s *PercolateService) IgnoreUnavailable(ignoreUnavailable bool) *PercolateService {
+// PercolateIndex is the index to percolate the document into. Defaults to index.
+func (s *PercolateService) PercolateIndex(percolateIndex string) *PercolateService {
+ s.percolateIndex = percolateIndex
+// PercolatePreference defines which shard to prefer when executing
+// the percolate request.
+func (s *PercolateService) PercolatePreference(percolatePreference string) *PercolateService {
+ s.percolatePreference = percolatePreference
+func (s *PercolateService) Version(version interface{}) *PercolateService {
+// VersionType is the specific version type.
+func (s *PercolateService) VersionType(versionType string) *PercolateService {
+// Routing is a list of specific routing values.
+func (s *PercolateService) Routing(routing []string) *PercolateService {
+// Preference specifies the node or shard the operation should be
+// performed on (default: random).
+func (s *PercolateService) Preference(preference string) *PercolateService {
+func (s *PercolateService) Pretty(pretty bool) *PercolateService {
+// Doc wraps the given document into the "doc" key of the body.
+func (s *PercolateService) Doc(doc interface{}) *PercolateService {
+ return s.BodyJson(map[string]interface{}{"doc": doc})
+// BodyJson is the percolator request definition using the percolate DSL.
+func (s *PercolateService) BodyJson(body interface{}) *PercolateService {
+// BodyString is the percolator request definition using the percolate DSL.
+func (s *PercolateService) BodyString(body string) *PercolateService {
+func (s *PercolateService) buildURL() (string, url.Values, error) {
+ path, err := uritemplates.Expand("/{index}/{type}/_percolate", map[string]string{
+ if len(s.routing) > 0 {
+ params.Set("routing", strings.Join(s.routing, ","))
+ if s.percolateIndex != "" {
+ params.Set("percolate_index", s.percolateIndex)
+ if s.percolatePreference != "" {
+ params.Set("percolate_preference", s.percolatePreference)
+ if s.percolateRouting != "" {
+ params.Set("percolate_routing", s.percolateRouting)
+ if s.percolateFormat != "" {
+ params.Set("percolate_format", s.percolateFormat)
+ if s.percolateType != "" {
+ params.Set("percolate_type", s.percolateType)
+func (s *PercolateService) Validate() error {
+func (s *PercolateService) Do() (*PercolateResponse, error) {
+ ret := new(PercolateResponse)
+// PercolateResponse is the response of PercolateService.Do.
+type PercolateResponse struct {
+ TookInMillis int64 `json:"took"` // search time in milliseconds
+ Total int64 `json:"total"` // total matches
+ Matches []*PercolateMatch `json:"matches,omitempty"`
+ Facets SearchFacets `json:"facets,omitempty"` // results from facets
+ Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations
+// PercolateMatch returns a single match in a PercolateResponse.
+type PercolateMatch struct {
+ Score float64 `json:"_score,omitempty"`
@@ -0,0 +1,59 @@
+func TestPercolate(t *testing.T) {
+ client := setupTestClientAndCreateIndex(t) //, SetTraceLog(log.New(os.Stdout, "", 0)))
+ // Register a query in the ".percolator" type.
+ search := NewSearchSource().Query(NewMatchQuery("message", "Golang"))
+ _, err = client.Index().
+ Index(testIndexName).Type(".percolator").Id("1").
+ BodyJson(search.Source()).
+ // Percolate should return our registered query
+ newTweet := tweet{User: "olivere", Message: "Golang is fun."}
+ res, err := client.Percolate().
+ Index(testIndexName).Type("tweet").
+ Doc(newTweet). // shortcut for: BodyJson(map[string]interface{}{"doc": newTweet}).
+ Pretty(true).
+ if res.Total != 1 {
+ t.Fatalf("expected 1 result; got: %d", res.Total)
+ if res.Matches == nil {
+ t.Fatalf("expected Matches; got: %v", res.Matches)
+ matches := res.Matches
+ if matches == nil {
+ t.Fatalf("expected matches as map; got: %v", matches)
+ if len(matches) != 1 {
+ t.Fatalf("expected %d registered matches; got: %d", 1, len(matches))
+ if matches[0].Id != "1" {
+ t.Errorf("expected to return query %q; got: %q", "1", matches[0].Id)
@@ -0,0 +1,117 @@
+// PingService checks if an Elasticsearch server on a given URL is alive.
+// When asked for, it can also return various information about the
+// Elasticsearch server, e.g. the Elasticsearch version number.
+// Ping simply starts a HTTP GET request to the URL of the server.
+// If the server responds with HTTP Status code 200 OK, the server is alive.
+type PingService struct {
+ httpHeadOnly bool
+// PingResult is the result returned from querying the Elasticsearch server.
+type PingResult struct {
+ Version struct {
+ Number string `json:"number"`
+ BuildHash string `json:"build_hash"`
+ BuildTimestamp string `json:"build_timestamp"`
+ BuildSnapshot bool `json:"build_snapshot"`
+ LuceneVersion string `json:"lucene_version"`
+ } `json:"version"`
+ TagLine string `json:"tagline"`
+func NewPingService(client *Client) *PingService {
+ return &PingService{
+ url: DefaultURL,
+ httpHeadOnly: false,
+ pretty: false,
+func (s *PingService) URL(url string) *PingService {
+ s.url = url
+func (s *PingService) Timeout(timeout string) *PingService {
+// HeadOnly makes the service to only return the status code in Do;
+// the PingResult will be nil.
+func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService {
+ s.httpHeadOnly = httpHeadOnly
+func (s *PingService) Pretty(pretty bool) *PingService {
+// Do returns the PingResult, the HTTP status code of the Elasticsearch
+// server, and an error.
+func (s *PingService) Do() (*PingResult, int, error) {
+ url_ := s.url + "/"
+ url_ += "?" + params.Encode()
+ var method string
+ if s.httpHeadOnly {
+ method = "HEAD"
+ method = "GET"
+ // Notice: This service must NOT use PerformRequest!
+ req, err := NewRequest(method, url_)
+ return nil, 0, err
+ res, err := s.client.c.Do((*http.Request)(req))
+ var ret *PingResult
+ if !s.httpHeadOnly {
+ ret = new(PingResult)
+ if err := json.NewDecoder(res.Body).Decode(ret); err != nil {
+ return nil, res.StatusCode, err
+ return ret, res.StatusCode, nil
@@ -0,0 +1,67 @@
+func TestPingGet(t *testing.T) {
+ res, code, err := client.Ping().Do()
+ if code != http.StatusOK {
+ t.Errorf("expected status code = %d; got %d", http.StatusOK, code)
+ t.Fatalf("expected to return result, got: %v", res)
+ if res.Status != http.StatusOK {
+ t.Errorf("expected Status = %d; got %d", http.StatusOK, res.Status)
+ if res.Name == "" {
+ t.Errorf("expected Name != \"\"; got %q", res.Name)
+ if res.Version.Number == "" {
+ t.Errorf("expected Version.Number != \"\"; got %q", res.Version.Number)
+func TestPingHead(t *testing.T) {
+ res, code, err := client.Ping().HttpHeadOnly(true).Do()
+ t.Errorf("expected not to return result, got: %v", res)
+func TestPingHeadFailure(t *testing.T) {
+ res, code, err := client.Ping().
+ URL("http://127.0.0.1:9299").
+ HttpHeadOnly(true).
+ t.Error("expected error, got nil")
+ if code == http.StatusOK {
+ t.Errorf("expected status code != %d; got %d", http.StatusOK, code)
@@ -0,0 +1,222 @@
+// PutMappingService allows to register specific mapping definition
+// for a specific type.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-put-mapping.html.
+type PutMappingService struct {
+ ignoreConflicts *bool
+ bodyJson map[string]interface{}
+// NewPutMappingService creates a new PutMappingService.
+func NewPutMappingService(client *Client) *PutMappingService {
+ return &PutMappingService{
+// Index is a list of index names the mapping should be added to
+// (supports wildcards); use `_all` or omit to add the mapping on all indices.
+func (s *PutMappingService) Index(index ...string) *PutMappingService {
+// Type is the name of the document type.
+func (s *PutMappingService) Type(typ string) *PutMappingService {
+func (s *PutMappingService) Timeout(timeout string) *PutMappingService {
+func (s *PutMappingService) MasterTimeout(masterTimeout string) *PutMappingService {
+func (s *PutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *PutMappingService {
+func (s *PutMappingService) AllowNoIndices(allowNoIndices bool) *PutMappingService {
+func (s *PutMappingService) ExpandWildcards(expandWildcards string) *PutMappingService {
+// IgnoreConflicts specifies whether to ignore conflicts while updating
+// the mapping (default: false).
+func (s *PutMappingService) IgnoreConflicts(ignoreConflicts bool) *PutMappingService {
+ s.ignoreConflicts = &ignoreConflicts
+func (s *PutMappingService) Pretty(pretty bool) *PutMappingService {
+// BodyJson contains the mapping definition.
+func (s *PutMappingService) BodyJson(mapping map[string]interface{}) *PutMappingService {
+ s.bodyJson = mapping
+// BodyString is the mapping definition serialized as a string.
+func (s *PutMappingService) BodyString(mapping string) *PutMappingService {
+ s.bodyString = mapping
+func (s *PutMappingService) buildURL() (string, url.Values, error) {
+ // Build URL: Typ MUST be specified and is verified in Validate.
+ path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+ path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{
+ if s.ignoreConflicts != nil {
+ params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts))
+func (s *PutMappingService) Validate() error {
+func (s *PutMappingService) Do() (*PutMappingResponse, error) {
+ ret := new(PutMappingResponse)
+// PutMappingResponse is the response of PutMappingService.Do.
+type PutMappingResponse struct {
@@ -0,0 +1,94 @@
+func TestPutMappingURL(t *testing.T) {
+ "tweet",
+ "/_mapping/tweet",
+ []string{"*"},
+ "/%2A/_mapping/tweet",
+ "/store-1%2Cstore-2/_mapping/tweet",
+ path, _, err := client.PutMapping().Index(test.Indices...).Type(test.Type).buildURL()
+func TestMappingLifecycle(t *testing.T) {
+ mapping := `{
+ "tweetdoc":{
+ "message":{
+ "type":"string",
+ "store":true
+ }`
+ putresp, err := client.PutMapping().Index(testIndexName2).Type("tweetdoc").BodyString(mapping).Do()
+ t.Fatalf("expected put mapping to succeed; got: %v", err)
+ if putresp == nil {
+ t.Fatalf("expected put mapping response; got: %v", putresp)
+ if !putresp.Acknowledged {
+ t.Fatalf("expected put mapping ack; got: %v", putresp.Acknowledged)
+ getresp, err := client.GetMapping().Index(testIndexName2).Type("tweetdoc").Do()
+ t.Fatalf("expected get mapping to succeed; got: %v", err)
+ if getresp == nil {
+ t.Fatalf("expected get mapping response; got: %v", getresp)
+ props, ok := getresp[testIndexName2]
+ t.Fatalf("expected JSON root to be of type map[string]interface{}; got: %#v", props)
+ delresp, err := client.DeleteMapping().Index(testIndexName2).Type("tweetdoc").Do()
+ t.Fatalf("expected delete mapping to succeed; got: %v", err)
+ if delresp == nil {
+ t.Fatalf("expected delete mapping response; got: %v", delresp)
+ if !delresp.Acknowledged {
+ t.Fatalf("expected delete mapping ack; got: %v", delresp.Acknowledged)
+// PutTemplateService creates or updates a search template.
+// The documentation can be found at
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type PutTemplateService struct {
+// NewPutTemplateService creates a new PutTemplateService.
+func NewPutTemplateService(client *Client) *PutTemplateService {
+ return &PutTemplateService{
+func (s *PutTemplateService) Id(id string) *PutTemplateService {
+// OpType is an explicit operation type.
+func (s *PutTemplateService) OpType(opType string) *PutTemplateService {
+ s.opType = opType
+func (s *PutTemplateService) Version(version int) *PutTemplateService {
+func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService {
+// BodyJson is the document as a JSON serializable object.
+func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService {
+// BodyString is the document as a string.
+func (s *PutTemplateService) BodyString(body string) *PutTemplateService {
+func (s *PutTemplateService) buildURL() (string, url.Values, error) {
+ if s.opType != "" {
+ params.Set("op_type", s.opType)
+func (s *PutTemplateService) Validate() error {
+func (s *PutTemplateService) Do() (*PutTemplateResponse, error) {
+ ret := new(PutTemplateResponse)
+// PutTemplateResponse is the response of PutTemplateService.Do.
+type PutTemplateResponse struct {