浏览代码

Merge branch 'master' of http://192.168.3.207:10080/group3/SwordFish_Interface_Service
auth token过滤器开发提交前更新合并

wanghuidong 4 年之前
父节点
当前提交
cda1db4289
共有 100 个文件被更改,包括 15568 次插入26 次删除
  1. 9 16
      api/v1/projects.go
  2. 5 0
      conf/dev/es.toml
  3. 5 0
      conf/dev/mongo.toml
  4. 45 0
      db/es.go
  5. 45 0
      db/mongo.go
  6. 2 1
      db/mysql.go
  7. 2 0
      main.go
  8. 13 2
      service/projectDetail.go
  9. 88 7
      service/projects.go
  10. 329 0
      sword_base/elastic/elasticSim.go
  11. 31 0
      sword_base/elastic/elasticSim_test.go
  12. 1374 0
      sword_base/elastic/elasticutil.go
  13. 99 0
      sword_base/elastic/elasticutil_Index_test.go
  14. 40 0
      sword_base/elastic/elasticutil_senior_test.go
  15. 76 0
      sword_base/elastic/elasticutil_tcp_test.go
  16. 248 0
      sword_base/elastic/elasticutil_test.go
  17. 50 0
      sword_base/elastic/util.go
  18. 785 0
      sword_base/mongodb/mongodb.go
  19. 127 0
      sword_base/mongodb/mongodb_test.go
  20. 27 0
      sword_base/olivere/elastic.v1/CONTRIBUTING.md
  21. 20 0
      sword_base/olivere/elastic.v1/LICENSE
  22. 388 0
      sword_base/olivere/elastic.v1/README.md
  23. 107 0
      sword_base/olivere/elastic.v1/alias.go
  24. 123 0
      sword_base/olivere/elastic.v1/alias_test.go
  25. 160 0
      sword_base/olivere/elastic.v1/aliases.go
  26. 146 0
      sword_base/olivere/elastic.v1/aliases_test.go
  27. 301 0
      sword_base/olivere/elastic.v1/bulk.go
  28. 112 0
      sword_base/olivere/elastic.v1/bulk_delete_request.go
  29. 42 0
      sword_base/olivere/elastic.v1/bulk_delete_request_test.go
  30. 173 0
      sword_base/olivere/elastic.v1/bulk_index_request.go
  31. 63 0
      sword_base/olivere/elastic.v1/bulk_index_request_test.go
  32. 17 0
      sword_base/olivere/elastic.v1/bulk_request.go
  33. 370 0
      sword_base/olivere/elastic.v1/bulk_test.go
  34. 244 0
      sword_base/olivere/elastic.v1/bulk_update_request.go
  35. 79 0
      sword_base/olivere/elastic.v1/bulk_update_request_test.go
  36. 28 0
      sword_base/olivere/elastic.v1/canonicalize.go
  37. 41 0
      sword_base/olivere/elastic.v1/canonicalize_test.go
  38. 96 0
      sword_base/olivere/elastic.v1/clear_scroll.go
  39. 72 0
      sword_base/olivere/elastic.v1/clear_scroll_test.go
  40. 1240 0
      sword_base/olivere/elastic.v1/client.go
  41. 611 0
      sword_base/olivere/elastic.v1/client_test.go
  42. 16 0
      sword_base/olivere/elastic.v1/cluster-test/Makefile
  43. 63 0
      sword_base/olivere/elastic.v1/cluster-test/README.md
  44. 357 0
      sword_base/olivere/elastic.v1/cluster-test/cluster-test.go
  45. 185 0
      sword_base/olivere/elastic.v1/cluster_health.go
  46. 74 0
      sword_base/olivere/elastic.v1/cluster_health_test.go
  47. 192 0
      sword_base/olivere/elastic.v1/cluster_state.go
  48. 92 0
      sword_base/olivere/elastic.v1/cluster_state_test.go
  49. 90 0
      sword_base/olivere/elastic.v1/connection.go
  50. 152 0
      sword_base/olivere/elastic.v1/count.go
  51. 83 0
      sword_base/olivere/elastic.v1/count_test.go
  52. 75 0
      sword_base/olivere/elastic.v1/create_index.go
  53. 26 0
      sword_base/olivere/elastic.v1/decoder.go
  54. 49 0
      sword_base/olivere/elastic.v1/decoder_test.go
  55. 118 0
      sword_base/olivere/elastic.v1/delete.go
  56. 292 0
      sword_base/olivere/elastic.v1/delete_by_query.go
  57. 76 0
      sword_base/olivere/elastic.v1/delete_by_query_test.go
  58. 57 0
      sword_base/olivere/elastic.v1/delete_index.go
  59. 136 0
      sword_base/olivere/elastic.v1/delete_mapping.go
  60. 40 0
      sword_base/olivere/elastic.v1/delete_mapping_test.go
  61. 118 0
      sword_base/olivere/elastic.v1/delete_template.go
  62. 83 0
      sword_base/olivere/elastic.v1/delete_test.go
  63. 51 0
      sword_base/olivere/elastic.v1/doc.go
  64. 48 0
      sword_base/olivere/elastic.v1/errors.go
  65. 45 0
      sword_base/olivere/elastic.v1/errors_test.go
  66. 547 0
      sword_base/olivere/elastic.v1/example_test.go
  67. 71 0
      sword_base/olivere/elastic.v1/exists.go
  68. 329 0
      sword_base/olivere/elastic.v1/explain.go
  69. 41 0
      sword_base/olivere/elastic.v1/explain_test.go
  70. 74 0
      sword_base/olivere/elastic.v1/fetch_source_context.go
  71. 92 0
      sword_base/olivere/elastic.v1/fetch_source_context_test.go
  72. 9 0
      sword_base/olivere/elastic.v1/filter.go
  73. 167 0
      sword_base/olivere/elastic.v1/flush.go
  74. 22 0
      sword_base/olivere/elastic.v1/flush_test.go
  75. 47 0
      sword_base/olivere/elastic.v1/geo_point.go
  76. 24 0
      sword_base/olivere/elastic.v1/geo_point_test.go
  77. 223 0
      sword_base/olivere/elastic.v1/get.go
  78. 172 0
      sword_base/olivere/elastic.v1/get_mapping.go
  79. 50 0
      sword_base/olivere/elastic.v1/get_mapping_test.go
  80. 113 0
      sword_base/olivere/elastic.v1/get_template.go
  81. 51 0
      sword_base/olivere/elastic.v1/get_template_test.go
  82. 165 0
      sword_base/olivere/elastic.v1/get_test.go
  83. 496 0
      sword_base/olivere/elastic.v1/highlight.go
  84. 168 0
      sword_base/olivere/elastic.v1/highlight_test.go
  85. 216 0
      sword_base/olivere/elastic.v1/index.go
  86. 145 0
      sword_base/olivere/elastic.v1/index_close.go
  87. 50 0
      sword_base/olivere/elastic.v1/index_exists.go
  88. 186 0
      sword_base/olivere/elastic.v1/index_get.go
  89. 189 0
      sword_base/olivere/elastic.v1/index_get_settings.go
  90. 81 0
      sword_base/olivere/elastic.v1/index_get_settings_test.go
  91. 84 0
      sword_base/olivere/elastic.v1/index_get_test.go
  92. 146 0
      sword_base/olivere/elastic.v1/index_open.go
  93. 517 0
      sword_base/olivere/elastic.v1/index_test.go
  94. 122 0
      sword_base/olivere/elastic.v1/indices_delete_template.go
  95. 107 0
      sword_base/olivere/elastic.v1/indices_exists_template.go
  96. 68 0
      sword_base/olivere/elastic.v1/indices_exists_template_test.go
  97. 155 0
      sword_base/olivere/elastic.v1/indices_exists_type.go
  98. 121 0
      sword_base/olivere/elastic.v1/indices_exists_type_test.go
  99. 128 0
      sword_base/olivere/elastic.v1/indices_get_template.go
  100. 41 0
      sword_base/olivere/elastic.v1/indices_get_template_test.go

+ 9 - 16
api/v1/projects.go

@@ -1,6 +1,7 @@
 package v1
 
 import (
+	"sfbase/global"
 	"sfis/service"
 
 	"github.com/gin-gonic/gin"
@@ -15,17 +16,18 @@ func ProjectApiRegister(router *gin.Engine) {
 	{
 		routerGroup.POST("/projectList", getProjectsList)
 		routerGroup.POST("/projectDetail", getProjectDetail)
-		routerGroup.POST("/winnerProjectList", winnerProjectList)
 	}
 }
 
 //获取项目列表接口
 func getProjectsList(c *gin.Context) {
-	projectName := c.Query("name")
-	winner := c.Query("winner")
-	time := c.Query("time")
+	projectName := c.PostForm("name")
+	winner := c.PostForm("winner")
+	time := c.PostForm("time")
+	global.Logger.Info("projectName " + projectName)
 	data := &map[string]interface{}{}
 	if projectName != "" || winner != "" {
+		global.Logger.Info("666")
 		data = service.ProjectListData(projectName, winner, time)
 	}
 	c.JSON(200, data)
@@ -33,20 +35,11 @@ func getProjectsList(c *gin.Context) {
 
 //获取项目详情
 func getProjectDetail(c *gin.Context) {
-	id := c.Query("id")
-	data := &map[string]interface{}{}
+	id := c.PostForm("id")
+	global.Logger.Info("id "+ id)
+	data := map[string]interface{}{}
 	if id != "" {
 		data = service.ProjectDetailData(id)
 	}
 	c.JSON(200, data)
 }
-
-//中标信息查询项目
-func winnerProjectList(c *gin.Context) {
-	winner := c.Query("winner")
-	data := &map[string]interface{}{}
-	if winner != "" {
-		data = service.WinnerProjectListData(winner)
-	}
-	c.JSON(200, data)
-}

+ 5 - 0
conf/dev/es.toml

@@ -0,0 +1,5 @@
+[project]
+	addr = "http://192.168.3.128:9800"
+	index = "projectset"
+	itype = "projectset"
+	pool = 15

+ 5 - 0
conf/dev/mongo.toml

@@ -0,0 +1,5 @@
+[qyfw]
+	addr = "192.168.3.128:27080"
+	size = 15
+	db = "jyqyfw"
+	coll=  "user_data"

+ 45 - 0
db/es.go

@@ -0,0 +1,45 @@
+package db
+
+import (
+	"log"
+	"sfbase/core"
+	"sfbase/elastic"
+	"sfbase/global"
+)
+
+type EsConfig struct {
+	Project struct {
+		Addr  string `mapstructure:"addr"`
+		Index string `mapstructure:"index"`
+		Itype string `mapstructure:"itype"`
+		Pool  int    `mapstructure:"pool"`
+	} `mapstructure:"project"`
+}
+
+var (
+	Es        *elastic.Elastic
+	EsConfigs EsConfig
+)
+
+func InitEs() {
+	//数据库属性定义在业务中,解析配置文件加载到结构体基本方法viper在spirit中
+
+	_, err := core.Viper(global.EnvPathPrefix+"/"+global.Env+"/es.toml", &EsConfigs)
+	if err != nil {
+		log.Println("EsConfig:", err)
+		return
+	}
+	log.Println("EsConfigs", EsConfigs)
+	Es = &elastic.Elastic{
+		S_esurl: EsConfigs.Project.Addr,
+		I_size:  EsConfigs.Project.Pool,
+	}
+	Es.InitElasticSize()
+}
+
+func GetEs() *elastic.Elastic {
+	if Es == nil {
+		log.Printf("ERROR:Es 实例为空!请重新实例化.")
+	}
+	return Es
+}

+ 45 - 0
db/mongo.go

@@ -0,0 +1,45 @@
+package db
+
+import (
+	"log"
+	"sfbase/core"
+	"sfbase/global"
+	"sfbase/mongodb"
+)
+
+type MongoConfig struct {
+	Qyfw struct {
+		Addr string `mapstructure:"addr"`
+		Size int    `mapstructure:"size"`
+		Db   string `mapstructure:"db"`
+		Coll string `mapstructure:"coll"`
+	} `mapstructure:"qyfw"`
+}
+
+var (
+	Qyfw *mongodb.MongodbSim
+)
+
+func InitMongo() {
+	//数据库属性定义在业务中,解析配置文件加载到结构体基本方法viper在spirit中
+	sfisDBConfig := MongoConfig{}
+	_, err := core.Viper(global.EnvPathPrefix+"/"+global.Env+"/mongo.toml", &sfisDBConfig)
+	if err != nil {
+		log.Println("sfisDBConfig:", err)
+		return
+	}
+
+	Qyfw = &mongodb.MongodbSim{
+		MongodbAddr: sfisDBConfig.Qyfw.Addr,
+		Size:        sfisDBConfig.Qyfw.Size,
+		DbName:      sfisDBConfig.Qyfw.Db,
+	}
+	Qyfw.InitPool()
+}
+
+func GetQyfw() *mongodb.MongodbSim {
+	if Qyfw == nil {
+		log.Printf("ERROR:Qyfw 实例为空!请重新实例化.")
+	}
+	return Qyfw
+}

+ 2 - 1
db/mysql.go

@@ -1,11 +1,12 @@
 package db
 
 import (
-	"gorm.io/gorm"
 	"log"
 	"sfbase/core"
 	"sfbase/global"
 	"sfbase/mysql"
+
+	"gorm.io/gorm"
 )
 
 type MysqlDBConfig struct {

+ 2 - 0
main.go

@@ -22,6 +22,8 @@ func main() {
 		log.Println("zap日志初始化异常,清检查后重试")
 	}
 	db.InitDB()
+	db.InitEs()
+	db.InitMongo()
 	if db.GetSFISDB() != nil {
 		//todo other caches service or init operation
 		users := make([]*model.User, 0)

+ 13 - 2
service/projectDetail.go

@@ -1,11 +1,22 @@
 package service
 
 import (
+	"fmt"
+	"sfbase/core"
 	"sfbase/global"
+	"sfis/db"
 )
 
-func ProjectDetailData(id string) *map[string]interface{} {
+func ProjectDetailData(id string) map[string]interface{} {
 	global.Logger.Info("id " + id)
-	data := &map[string]interface{}{}
+	data := map[string]interface{}{}
+	_query := `{"query":{"bool":{"must":[{"term":{"id":"%s"}}],"must_not":[],"should":[]}}}`
+	query := fmt.Sprintf(_query, id)
+	INDEX := core.GetStringConf("es.project.index")
+	TYPE := core.GetStringConf("es.project.itype")
+	infos := db.Es.Get(INDEX, TYPE, query)
+	if infos != nil && len(*infos) > 0 {
+		data = (*infos)[0]
+	}
 	return data
 }

+ 88 - 7
service/projects.go

@@ -1,19 +1,100 @@
 package service
 
 import (
+	"fmt"
+	"sfbase/core"
+	"sfbase/elastic"
 	"sfbase/global"
+	"sfis/db"
+	"strconv"
+	"strings"
 )
 
-func ProjectListData(projectName, winner, time string) *map[string]interface{} {
+var (
+	pjt_count    = 100
+	pjt_field    = `"_id","area","city","bidamount","firsttime","projectname","s_winner","buyer","buyerclass"`
+	pjt_sort     = `{"firsttime":-1}`
+	query        = `{"query": {"bool": {"must":[%s],"should":[%s],"minimum_should_match": 1}}}`
+	query_string = `{"constant_score": {"boost": 2,"query": {"match_phrase": {"projectname.pname": {"analyzer": "my_ngram","query": "%s","slop": 6}}}}},{"multi_match": {"query": "%s","fields": ["projectname.pname"],"analyzer": "ik","minimum_should_match": "100%%"}}`
+	query_winner = `{"term": {"s_winner": "%s"}}`
+	SR           = strings.Replace
+	HL           = `"highlight": {"pre_tags": [""],"post_tags": [""],"fields": {%s}}`
+	highlightStr = `%s: {"fragment_size": %d,"number_of_fragments": 1}`
+)
+
+func ProjectListData(projectName, winner, time string) (data *map[string]interface{}) {
 	global.Logger.Info("projectName " + projectName)
 	global.Logger.Info("winner " + winner)
 	global.Logger.Info("time " + time)
-	data := &map[string]interface{}{}
-	return data
+	time = strings.TrimSpace(time)
+	winner = strings.TrimSpace(winner)
+	pjt_name := strings.TrimSpace(projectName)
+	pjt_len := len([]rune(pjt_name))
+	data = &map[string]interface{}{}
+	qstr := ""
+	timestr := ""
+	if time != "" {
+		start := strings.Split(time, "_")[0]
+		end := strings.Split(time, "_")[1]
+		if start == "" && end != "" {
+			timestr = `{"range": {"firsttime": {"lte": ` + end + `}}}`
+		} else if start != "" && end == "" {
+			timestr = `{"range": {"firsttime": {"gte": ` + start + `}}}`
+		} else if start != "" && end != "" {
+			timestr = `{"range": {"firsttime": {"gte": ` + start + `,"lte": ` + end + `}}}`
+		}
+	}
+	if pjt_len >= 4 && winner == "" {
+		qstr = fmt.Sprintf(query, "", fmt.Sprintf(query_string, pjt_name, pjt_name))
+		if timestr != "" {
+			qstr = fmt.Sprintf(query, timestr, fmt.Sprintf(query_string, pjt_name, pjt_name))
+		}
+	} else if pjt_len >= 4 && winner != "" {
+		qstr = fmt.Sprintf(query, fmt.Sprintf(query_winner, winner), fmt.Sprintf(query_string, pjt_name, pjt_name))
+		if timestr != "" {
+			qstr = fmt.Sprintf(query, fmt.Sprintf(query_winner, winner)+","+timestr, fmt.Sprintf(query_string, pjt_name, pjt_name))
+		}
+	} else if winner != "" {
+		qstr = fmt.Sprintf(query, fmt.Sprintf(query_winner, winner), "")
+		if timestr != "" {
+			qstr = fmt.Sprintf(query, fmt.Sprintf(query_winner, winner)+","+timestr, "")
+		}
+	} else {
+		return
+	}
+	INDEX := core.GetStringConf("es.project.index")
+	TYPE := core.GetStringConf("es.project.itype")
+	Es := db.GetEs()
+	global.Logger.Info("INDEX " + INDEX)
+	global.Logger.Info("TYPE " + TYPE)
+	repl := GetAllByNgram(Es, INDEX, TYPE, qstr, "", pjt_sort, pjt_field, 0, pjt_count, 0, false)
+	if repl != nil && len(*repl) > 0 {
+		(*data)["data"] = repl
+	}
+	return
 }
 
-func WinnerProjectListData(winner string) *map[string]interface{} {
-	global.Logger.Info("winner " + winner)
-	data := &map[string]interface{}{}
-	return data
+func GetAllByNgram(Es *elastic.Elastic, index, itype, qstr, findfields, order, fields string, start, limit, count int, highlight bool) *[]map[string]interface{} {
+	if qstr != "" {
+		if highlight {
+			ws := []string{}
+			for _, w := range strings.Split(findfields, ",") {
+				ws = append(ws, fmt.Sprintf(highlightStr, w, count))
+			}
+			qstr = qstr[:len(qstr)-1] + `,` + fmt.Sprintf(HL, strings.Join(ws, ",")) + `}`
+		}
+		if len(fields) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"_source":[` + fields + "]}"
+		}
+		if len(order) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"sort":[` + SR(SR(SR(SR(order, ",", "},{", -1), " ", "", -1), ":-1", `:"desc"`, -1), ":1", `:"asc"`, -1) + `]}`
+		}
+		if start > -1 {
+			qstr = qstr[:len(qstr)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		global.Logger.Info("GetAllByNgram:" + qstr)
+		return Es.Get(index, itype, qstr)
+	} else {
+		return nil
+	}
 }

+ 329 - 0
sword_base/elastic/elasticSim.go

@@ -0,0 +1,329 @@
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+
+	es "sfbase/olivere/elastic.v1"
+)
+
+type Elastic struct {
+	S_esurl      string
+	I_size       int
+	Addrs        []string
+	Pool         chan *es.Client
+	lastTime     int64
+	lastTimeLock sync.Mutex
+	ntimeout     int
+}
+
+func (e *Elastic) InitElasticSize() {
+	defer catch()
+	e.Pool = make(chan *es.Client, e.I_size)
+	for _, s := range strings.Split(e.S_esurl, ",") {
+		e.Addrs = append(e.Addrs, s)
+	}
+	for i := 0; i < e.I_size; i++ {
+		client, _ := es.NewClient(es.SetURL(e.Addrs...), es.SetMaxRetries(2), es.SetSniff(false))
+		e.Pool <- client
+	}
+}
+
+//关闭连接
+func (e *Elastic) DestoryEsConn(client *es.Client) {
+	select {
+	case e.Pool <- client:
+		break
+	case <-time.After(time.Second * 1):
+		if client != nil {
+			client.Stop()
+		}
+		client = nil
+	}
+}
+
+func (e *Elastic) GetEsConn() *es.Client {
+	select {
+	case c := <-e.Pool:
+		if c == nil || !c.IsRunning() {
+			log.Println("new esclient.", len(e.Pool))
+			client, err := es.NewClient(es.SetURL(e.Addrs...),
+				es.SetMaxRetries(2), es.SetSniff(false))
+			if err == nil && client.IsRunning() {
+				return client
+			}
+		}
+		return c
+	case <-time.After(time.Second * 4):
+		//超时
+		e.ntimeout++
+		e.lastTimeLock.Lock()
+		defer e.lastTimeLock.Unlock()
+		//12秒后允许创建链接
+		c := time.Now().Unix() - e.lastTime
+		if c > 12 {
+			e.lastTime = time.Now().Unix()
+			log.Println("add client..", len(e.Pool))
+			c, _ := es.NewClient(es.SetURL(e.Addrs...), es.SetMaxRetries(2), es.SetSniff(false))
+			go func() {
+				for i := 0; i < 2; i++ {
+					client, _ := es.NewClient(es.SetURL(e.Addrs...), es.SetMaxRetries(2), es.SetSniff(false))
+					e.Pool <- client
+				}
+			}()
+			return c
+		}
+		return nil
+	}
+}
+
+func (e *Elastic) Get(index, itype, query string) *[]map[string]interface{} {
+	client := e.GetEsConn()
+	defer func() {
+		go e.DestoryEsConn(client)
+	}()
+	var res []map[string]interface{}
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		searchResult, err := client.Search().Index(index).Type(itype).Source(query).Do()
+		if err != nil {
+			log.Println("从ES查询出错", err.Error())
+			return nil
+		}
+		if searchResult.Hits != nil {
+			resNum := len(searchResult.Hits.Hits)
+			if resNum < 5000 {
+				res = make([]map[string]interface{}, resNum)
+				for i, hit := range searchResult.Hits.Hits {
+					parseErr := json.Unmarshal(*hit.Source, &res[i])
+					if parseErr == nil && hit.Highlight != nil && res[i] != nil {
+						res[i]["highlight"] = map[string][]string(hit.Highlight)
+					}
+				}
+			} else {
+				log.Println("查询结果太多,查询到:", resNum, "条")
+			}
+		}
+	}
+	return &res
+}
+
+//关闭elastic
+func (e *Elastic) Close() {
+	defer catch()
+	for i := 0; i < e.I_size; i++ {
+		cli := <-e.Pool
+		cli.Stop()
+		cli = nil
+	}
+	e.Pool = nil
+	e = nil
+}
+
+//获取连接
+//func (e *Elastic) GetEsConn() (c *es.Client) {
+//	defer catch()
+//	select {
+//	case c = <-e.Pool:
+//		if c == nil || !c.IsRunning() {
+//			client, err := es.NewClient(es.SetURL(addrs...),
+//				es.SetMaxRetries(2), es.SetSniff(false))
+//			if err == nil && client.IsRunning() {
+//				return client
+//			}
+//			return nil
+//		}
+//		return
+//	case <-time.After(time.Second * 7):
+//		//超时
+//		ntimeout++
+//		log.Println("timeout times:", ntimeout)
+//		return nil
+//	}
+//}
+
+func (e *Elastic) BulkSave(index, itype string, obj *[]map[string]interface{}, isDelBefore bool) {
+	defer catch()
+	client := e.GetEsConn()
+	defer e.DestoryEsConn(client)
+	if client != nil {
+		req := client.Bulk()
+		for _, v := range *obj {
+			if isDelBefore {
+				req = req.Add(es.NewBulkDeleteRequest().Index(index).Type(itype).Id(fmt.Sprintf("%v", v["_id"])))
+			}
+			req = req.Add(es.NewBulkIndexRequest().Index(index).Type(itype).Doc(v))
+		}
+		_, err := req.Do()
+		if err != nil {
+			log.Println("批量保存到ES出错", err.Error())
+		}
+	}
+}
+
+//先删除后增
+func (e *Elastic) UpdateNewDoc(index, itype string, obj ...interface{}) bool {
+	defer catch()
+	client := e.GetEsConn()
+	defer e.DestoryEsConn(client)
+	b := false
+	if client != nil {
+		var err error
+		for _, v := range obj {
+			tempObj := objToMap(v)
+			id := fmt.Sprintf("%v", (*tempObj)["_id"])
+			client.Delete().Index(index).Type(itype).Id(id).Do()
+			_, err = client.Index().Index(index).Type(itype).BodyJson(tempObj).Do()
+			if err != nil {
+				log.Println("保存到ES出错", err.Error())
+			} else {
+				b = true
+			}
+		}
+
+	}
+	return b
+}
+
+//根据id删除索引对象
+func (e *Elastic) DelById(index, itype, id string) bool {
+	defer catch()
+	client := e.GetEsConn()
+	defer e.DestoryEsConn(client)
+	b := false
+	if client != nil {
+		var err error
+		_, err = client.Delete().Index(index).Type(itype).Id(id).Do()
+		if err != nil {
+			log.Println("更新检索出错:", err.Error())
+		} else {
+			b = true
+		}
+	}
+	return b
+}
+func (e *Elastic) GetNoLimit(index, itype, query string) *[]map[string]interface{} {
+	//log.Println("query  -- ", query)
+	client := e.GetEsConn()
+	defer e.DestoryEsConn(client)
+	var res []map[string]interface{}
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		searchResult, err := client.Search().Index(index).Type(itype).Source(query).Do()
+		if err != nil {
+			log.Println("从ES查询出错", err.Error())
+			return nil
+		}
+
+		if searchResult.Hits != nil {
+			resNum := len(searchResult.Hits.Hits)
+			res = make([]map[string]interface{}, resNum)
+			for i, hit := range searchResult.Hits.Hits {
+				json.Unmarshal(*hit.Source, &res[i])
+			}
+		}
+	}
+	return &res
+}
+func (e *Elastic) GetByIdField(index, itype, id, fields string) *map[string]interface{} {
+	client := e.GetEsConn()
+	defer e.DestoryEsConn(client)
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		query := `{"query":{"term":{"_id":"` + id + `"}}`
+		if len(fields) > 0 {
+			query = query + `,"_source":[` + fields + `]`
+		}
+		query = query + "}"
+		searchResult, err := client.Search().Index(index).Type(itype).Source(query).Do()
+		if err != nil {
+			log.Println("从ES查询出错", err.Error())
+			return nil
+		}
+		var res map[string]interface{}
+		if searchResult.Hits != nil {
+			resNum := len(searchResult.Hits.Hits)
+			if resNum == 1 {
+				res = make(map[string]interface{})
+				for _, hit := range searchResult.Hits.Hits {
+					json.Unmarshal(*hit.Source, &res)
+				}
+				return &res
+			}
+		}
+	}
+	return nil
+}
+
+func (e *Elastic) Count(index, itype string, query interface{}) int64 {
+	client := e.GetEsConn()
+	defer e.DestoryEsConn(client)
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		var qq es.Query
+		if qs, ok := query.(string); ok {
+			temp := es.BoolQuery{
+				QueryStrings: qs,
+			}
+			qq = temp
+		} else if qi, ok2 := query.(es.Query); ok2 {
+			qq = qi
+		}
+		n, err := client.Count(index).Type(itype).Query(qq).Do()
+		if err != nil {
+			log.Println("统计出错", err.Error())
+		}
+
+		return n
+	}
+	return 0
+}

+ 31 - 0
sword_base/elastic/elasticSim_test.go

@@ -0,0 +1,31 @@
+package elastic
+
+import (
+	"testing"
+)
+
+func _GetES() *Elastic {
+	es := &Elastic{
+		S_esurl: "http://192.168.3.18:9800",
+		I_size:  5,
+	}
+	es.InitElasticSize()
+	return es
+}
+
+func Test_bulksave(t *testing.T) {
+	es := _GetES()
+	defer es.Close()
+	es.BulkSave("qyfw", "qyfw", &[]map[string]interface{}{
+		map[string]interface{}{
+			"EntName": "Test1",
+			"RegNo":   "1234568",
+			"_id":     5,
+		},
+		map[string]interface{}{
+			"EntName": "Test2",
+			"RegNo":   "11117",
+			"_id":     9,
+		},
+	}, true)
+}

+ 1374 - 0
sword_base/elastic/elasticutil.go

@@ -0,0 +1,1374 @@
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"reflect"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	es "sfbase/olivere/elastic.v1"
+)
+
+//检索库服务地址
+var addrs []string
+var LocCity = map[string]string{}
+var SIZE = 30
+
+const (
+	QStr = `{"query":{"bool":{"must":[$and],"must_not":[],
+	"should":[$or],"minimum_should_match" : 1}}}`
+)
+
+var pool chan *es.Client
+var ntimeout int
+
+var syncPool sync.Pool
+
+//初始化全文检索
+func InitElastic(addr string) {
+	InitElasticSize(addr, SIZE)
+}
+
+//自定义HttpClient
+/**
+var httpclient = &http.Client{Transport: &http.Transport{
+	Dial: func(netw, addr string) (net.Conn, error) {
+		deadline := time.Now().Add(5000 * time.Millisecond)
+		c, err := net.DialTimeout(netw, addr, 10000*time.Millisecond)
+		if err != nil {
+			return nil, err
+		}
+		tcp_conn := c.(*net.TCPConn)
+		tcp_conn.SetKeepAlive(false)
+		tcp_conn.SetDeadline(deadline)
+		return tcp_conn, nil
+	},
+	DisableKeepAlives: true, //不保持,这样才能释放
+}}
+**/
+//var op = es.SetHttpClient(httpclient)
+var poolsize = int32(20)
+
+//n倍的池
+func InitElasticSize(addr string, size int) {
+	poolsize = int32(3 * size)
+	pool = make(chan *es.Client, poolsize)
+	for _, s := range strings.Split(addr, ",") {
+		addrs = append(addrs, s)
+	}
+	for i := 0; i < size; i++ {
+		client, _ := es.NewClient(es.SetURL(addrs...), es.SetMaxRetries(2), es.SetSniff(false))
+		pool <- client
+	}
+}
+
+//关闭连接
+func DestoryEsConn(client *es.Client) {
+	select {
+	case pool <- client:
+		break
+	case <-time.After(time.Second * 1):
+		if client != nil {
+			client.Stop()
+		}
+		client = nil
+	}
+}
+
+var (
+	lastTime     = int64(0)
+	lastTimeLock = &sync.Mutex{}
+)
+
+//获取连接
+
+func GetEsConn() *es.Client {
+	select {
+	case c := <-pool:
+		if c == nil || !c.IsRunning() {
+			log.Println("new esclient.", len(pool))
+			client, err := es.NewClient(es.SetURL(addrs...),
+				es.SetMaxRetries(2), es.SetSniff(false))
+			if err == nil && client.IsRunning() {
+				return client
+			}
+		}
+		return c
+	case <-time.After(time.Second * 4):
+		//超时
+		ntimeout++
+		lastTimeLock.Lock()
+		defer lastTimeLock.Unlock()
+		//12秒后允许创建链接
+		c := time.Now().Unix() - lastTime
+		if c > 12 {
+			lastTime = time.Now().Unix()
+			log.Println("add client..", len(pool))
+			c, _ := es.NewClient(es.SetURL(addrs...), es.SetMaxRetries(2), es.SetSniff(false))
+			go func() {
+				for i := 0; i < 2; i++ {
+					client, _ := es.NewClient(es.SetURL(addrs...), es.SetMaxRetries(2), es.SetSniff(false))
+					pool <- client
+				}
+			}()
+			return c
+		}
+		return nil
+	}
+}
+
+//保存对象
+func Save(index, itype string, obj interface{}) bool {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	defer func() {
+		if r := recover(); r != nil {
+			log.Println("[E]", r)
+			for skip := 1; ; skip++ {
+				_, file, line, ok := runtime.Caller(skip)
+				if !ok {
+					break
+				}
+				go log.Printf("%v,%v\n", file, line)
+			}
+		}
+	}()
+	_, err := client.Index().Index(index).Type(itype).BodyJson(objToMap(obj)).Do()
+	if err != nil {
+		log.Println("保存到ES出错", err.Error(), obj)
+		return false
+	} else {
+		return true
+	}
+
+}
+
+//通用查询
+//{"query": {"bool":{"must":[{"query_string":{"default_field":"name","query":"*"}}]}}}
+//{"query":{"bool":{"must":{"match":{"content":{"query":"fulltextsearch","operator":"and"}}},"should":[{"match":{"content":{"query":"Elasticsearch","boost":3}}},{"match":{"content":{"query":"Lucene","boost":2}}}]}}}
+//prefix
+//{"query":{"match":{"title":{"query":"brownfox","operator":"and"}}}} //默认为or
+//{"query":{"multi_match":{"query":"PolandStreetW1V","type":"most_fields","fields":["*_street","city^2","country","postcode"]}}}
+//{"query":{"wildcard":{"postcode":"W?F*HW"}}}
+//{"query":{"regexp":{"postcode":"W[0-9].+"}}}
+//{"query":{"filtered":{"filter":{"range":{"price":{"gte":10000}}}}},"aggs":{"single_avg_price":{"avg":{"field":"price"}}}}
+//{"query":{"match":{"make":"ford"}},"aggs":{"colors":{"terms":{"field":"color"}}}}//查fork有几种颜色
+//过滤器不会计算相关度的得分,所以它们在计算上更快一些
+//{"query":{"filtered":{"query":{"match_all":{}},"filter":{"range":{"balance":{"gte":20000,"lte":30000}}}}}}
+//{"query":{"match_all":{}},"from":10,"size":10,"_source":["account_number","balance"],"sort":{"balance":{"order":"desc"}}}
+//{"query":{"match_phrase":{"address":"milllane"}}}和match不同会去匹配整个短语,相当于must[]
+func Get(index, itype, query string) *[]map[string]interface{} {
+	//log.Println("query  -- ", query)
+	client := GetEsConn()
+	defer func() {
+		go DestoryEsConn(client)
+	}()
+	var res []map[string]interface{}
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		searchResult, err := client.Search().Index(index).Type(itype).Source(query).Do()
+		if err != nil {
+			log.Println("从ES查询出错", err.Error())
+			return nil
+		}
+
+		if searchResult.Hits != nil {
+			resNum := len(searchResult.Hits.Hits)
+			if resNum < 5000 {
+				res = make([]map[string]interface{}, resNum)
+				for i, hit := range searchResult.Hits.Hits {
+					//d := json.NewDecoder(bytes.NewBuffer(*hit.Source))
+					//d.UseNumber()
+					//d.Decode(&res[i])
+					parseErr := json.Unmarshal(*hit.Source, &res[i])
+					if parseErr == nil && hit.Highlight != nil && res[i] != nil {
+						res[i]["highlight"] = map[string][]string(hit.Highlight)
+					}
+				}
+			} else {
+				log.Println("查询结果太多,查询到:", resNum, "条")
+			}
+
+		}
+	}
+	return &res
+}
+func GetOA(index, itype, query string) (*[]map[string]interface{}, int) {
+	//log.Println("query  -- ", query)
+	client := GetEsConn()
+	defer func() {
+		go DestoryEsConn(client)
+	}()
+	var res []map[string]interface{}
+	var resNum int
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		searchResult, err := client.Search().Index(index).Type(itype).Source(query).Do()
+		if err != nil {
+			log.Println("从ES查询出错", err.Error())
+			return nil, 0
+		}
+
+		if searchResult.Hits != nil {
+			resNum = len(searchResult.Hits.Hits)
+			if resNum < 5000 {
+				res = make([]map[string]interface{}, resNum)
+				for i, hit := range searchResult.Hits.Hits {
+					//d := json.NewDecoder(bytes.NewBuffer(*hit.Source))
+					//d.UseNumber()
+					//d.Decode(&res[i])
+					parseErr := json.Unmarshal(*hit.Source, &res[i])
+					if parseErr == nil && hit.Highlight != nil && res[i] != nil {
+						res[i]["highlight"] = map[string][]string(hit.Highlight)
+					}
+				}
+			} else {
+				log.Println("查询结果太多,查询到:", resNum, "条")
+			}
+
+		}
+	}
+	return &res, resNum
+}
+
+func GetNoLimit(index, itype, query string) *[]map[string]interface{} {
+	//log.Println("query  -- ", query)
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	var res []map[string]interface{}
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		searchResult, err := client.Search().Index(index).Type(itype).Source(query).Do()
+		if err != nil {
+			log.Println("从ES查询出错", err.Error())
+			return nil
+		}
+
+		if searchResult.Hits != nil {
+			resNum := len(searchResult.Hits.Hits)
+			res = make([]map[string]interface{}, resNum)
+			for i, hit := range searchResult.Hits.Hits {
+				json.Unmarshal(*hit.Source, &res[i])
+			}
+		}
+	}
+	return &res
+}
+
+//分页查询
+//{"name":"张三","$and":[{"age":{"$gt":10}},{"age":{"$lte":20}}]}
+//fields直接是 `"_id","title"`
+func GetPage(index, itype, query, order, field string, start, limit int) *[]map[string]interface{} {
+	return Get(index, itype, MakeQuery(query, order, field, start, limit))
+}
+
+//openapi
+func GetOAPage(index, itype, query, order, field string, start, limit int) (*[]map[string]interface{}, int) {
+	return GetOA(index, itype, MakeQuery(query, order, field, start, limit))
+}
+
+var SR = strings.Replace
+
+func MakeQuery(query, order, fileds string, start, limit int) string {
+	res := AnalyQuery(query, "", QStr)
+	if len(res) > 10 {
+		res = SR(SR(SR(SR(res, ",$and", "", -1), "$and", "", -1), ",$or", "", -1), "$or", "", -1)
+		if len(fileds) > 0 {
+			//"_source":["account_number","balance"]
+			res = res[:len(res)-1] + `,"_source":[` + fileds + "]}"
+		}
+		//{"name":-1,"age":1}
+		if len(order) > 0 {
+			res = res[:len(res)-1] + `,"sort":[` + SR(SR(SR(SR(order, ",", "},{", -1), " ", "", -1), ":-1", `:"desc"`, -1), ":1", `:"asc"`, -1) + `]}`
+		}
+		if start > -1 {
+			res = res[:len(res)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		return res
+	}
+	return ""
+}
+
+//{"name":"aaa"}
+func AnalyQuery(query interface{}, parent string, result string) string {
+	m := make(map[string]interface{})
+	if q1, ok := query.(string); ok {
+		json.Unmarshal([]byte(q1), &m)
+	} else if q2, ok2 := query.(map[string]interface{}); ok2 {
+		m = q2
+	}
+	if len(parent) == 0 {
+		for k, v := range m {
+			if k == "$and" || k == "$or" {
+				temps := ""
+				if map1, ok := v.([]interface{}); ok {
+					for i := 0; i < len(map1); i++ {
+						temps += "," + AnalyQuery(map1[i], k, "")
+					}
+				}
+				if len(temps) > 0 {
+					temps = temps[1:]
+				}
+				result = SR(result, k, temps+","+k, 1)
+			} else {
+				switch reflect.TypeOf(v).String() {
+				case "string":
+					if strings.Index(k, "TERM_") == 0 {
+						result = SR(result, "$and", `{"term":{"`+SR(k, "TERM_", "", 1)+`":"`+fmt.Sprintf("%v", v)+`"}},$and`, 1)
+					} else {
+						result = SR(result, "$and", `{"query_string":{"default_field":"`+k+`","query":"`+fmt.Sprintf("%v", v)+`"}},$and`, 1)
+					}
+				case "int", "int8", "int32", "int64", "float32", "float64":
+					if strings.Index(k, "TERM_") == 0 {
+						result = SR(result, "$and", `{"term":{"`+SR(k, "TERM_", "", 1)+`":`+fmt.Sprintf("%v", v)+`}},$and`, 1)
+					} else {
+						result = SR(result, "$and", `{"query_string":{"default_field":"`+k+`","query":`+fmt.Sprintf("%v", v)+`}},$and`, 1)
+					}
+				default:
+					result = SR(result, "$and", AnalyQuery(v, k, "")+",$and", 1)
+				}
+			}
+		}
+		return result
+	} else {
+		for k, v := range m {
+			if k == "$in" {
+				s := ""
+				if map1, ok := v.([]interface{}); ok {
+					for i := 0; i < len(map1); i++ {
+						s += "," + `"` + fmt.Sprintf("%v", map1[i]) + `"`
+					}
+				}
+				if len(s) > 0 {
+					s = s[1:]
+				}
+				return `{"terms":{"` + parent + `":[` + s + `]}}`
+			} else if strings.Contains(k, "$lt") || strings.Contains(k, "$gt") {
+				return `{"range":{"` + parent + `":{"` + SR(k, "$", "", 1) + `":` + fmt.Sprintf("%v", v) + `}}}`
+			} else {
+				switch reflect.TypeOf(v).String() {
+				case "string":
+					if strings.Index(k, "TERM_") == 0 {
+						return `{"term":{"` + SR(k, "TERM_", "", 1) + `":"` + fmt.Sprintf("%v", v) + `"}}`
+					} else {
+						return `{"query_string":{"default_field":"` + k + `","query":"` + fmt.Sprintf("%v", v) + `"}}`
+					}
+				case "int", "int8", "int32", "int64", "float32", "float64":
+					if strings.Index(k, "TERM_") == 0 {
+						return `{"term":{"` + SR(k, "TERM_", "", 1) + `":` + fmt.Sprintf("%v", v) + `}}`
+					} else {
+						return `{"query_string":{"default_field":"` + k + `","query":` + fmt.Sprintf("%v", v) + `}}`
+					}
+				default:
+					return AnalyQuery(v, k, result)
+				}
+			}
+		}
+	}
+	return result
+}
+func GetByIdField(index, itype, id, fields string) *map[string]interface{} {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		query := `{"query":{"term":{"_id":"` + id + `"}}`
+		if len(fields) > 0 {
+			query = query + `,"_source":[` + fields + `]`
+		}
+		query = query + "}"
+		searchResult, err := client.Search().Index(index).Type(itype).Source(query).Do()
+		if err != nil {
+			log.Println("从ES查询出错", err.Error())
+			return nil
+		}
+		var res map[string]interface{}
+		if searchResult.Hits != nil {
+			resNum := len(searchResult.Hits.Hits)
+			if resNum == 1 {
+				res = make(map[string]interface{})
+				for _, hit := range searchResult.Hits.Hits {
+					json.Unmarshal(*hit.Source, &res)
+				}
+				return &res
+			}
+		}
+	}
+	return nil
+}
+
+//根据id来查询文档
+func GetById(index, itype string, ids ...string) *[]map[string]interface{} {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	var res []map[string]interface{}
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		query := es.NewIdsQuery().Ids(ids...)
+		searchResult, err := client.Search().Index(index).Type(itype).Query(&query).Do()
+		if err != nil {
+			log.Println("从ES查询出错", err.Error())
+			return nil
+		}
+
+		if searchResult.Hits != nil {
+			resNum := len(searchResult.Hits.Hits)
+			if resNum < 5000 {
+				res = make([]map[string]interface{}, resNum)
+				for i, hit := range searchResult.Hits.Hits {
+					json.Unmarshal(*hit.Source, &res[i])
+				}
+			} else {
+				log.Println("查询结果太多,查询到:", resNum, "条")
+			}
+
+		}
+	}
+	return &res
+}
+
+//删除某个索引,根据查询
+func Del(index, itype string, query interface{}) bool {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	b := false
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		var err error
+		if qs, ok := query.(string); ok {
+			temp := es.BoolQuery{
+				QueryStrings: qs,
+			}
+			_, err = client.DeleteByQuery().Index(index).Type(itype).Query(temp).Do()
+		} else if qi, ok2 := query.(es.Query); ok2 {
+			_, err = client.DeleteByQuery().Index(index).Type(itype).Query(qi).Do()
+		}
+		if err != nil {
+			log.Println("删除索引出错:", err.Error())
+		} else {
+			b = true
+		}
+	}
+	return b
+}
+
+//根据语句更新对象
+func Update(index, itype, id string, updateStr string) bool {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	b := false
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		var err error
+		_, err = client.Update().Index(index).Type(itype).Id(id).Script(updateStr).ScriptLang("groovy").Do()
+		if err != nil {
+			log.Println("更新检索出错:", err.Error())
+		} else {
+			b = true
+		}
+	}
+	return b
+}
+
+func BulkUpdate(index, itype string, ids []string, updateStr string) {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		for _, id := range ids {
+			_, err := client.Update().Index(index).Type(itype).Id(id).Script(updateStr).ScriptLang("groovy").Do()
+			if err != nil {
+				log.Println("更新检索出错:", err.Error())
+			}
+		}
+	}
+}
+
+//根据id删除索引对象
+func DelById(index, itype, id string) bool {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	b := false
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		var err error
+		_, err = client.Delete().Index(index).Type(itype).Id(id).Do()
+		if err != nil {
+			log.Println("更新检索出错:", err.Error())
+		} else {
+			b = true
+		}
+	}
+	return b
+}
+
+//先删除后增
+func UpdateNewDoc(index, itype string, obj ...interface{}) bool {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	b := false
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		var err error
+		for _, v := range obj {
+			tempObj := objToMap(v)
+			id := fmt.Sprintf("%v", (*tempObj)["_id"])
+			client.Delete().Index(index).Type(itype).Id(id).Do()
+			_, err = client.Index().Index(index).Type(itype).BodyJson(tempObj).Do()
+			if err != nil {
+				log.Println("保存到ES出错", err.Error())
+			} else {
+				b = true
+			}
+		}
+
+	}
+	return b
+}
+
+//把地市代码转为地市
+func getLoc(code string, res *map[string]string) (loc string) {
+	switch len(code) {
+	case 6:
+		loc = (*res)[code[:2]] + " " + (*res)[code[:4]] + " " + (*res)[code]
+		break
+	case 4:
+		loc = (*res)[code[:2]] + " " + (*res)[code]
+		break
+	case 2:
+		loc = (*res)[code]
+		break
+	}
+	return
+}
+
+func ConverData(ent *map[string]interface{}) map[string]interface{} {
+	tmp := *ent
+	id64, _ := tmp["ID"].(int64)
+	ids := fmt.Sprintf("%d", id64)
+	tmp2 := make(map[string]interface{})
+	tmp2["ID"] = ids
+	tmp2["_id"] = tmp["_id"]
+	tmp2["Area"] = tmp["Area"]
+	tmp2["LeRep"] = tmp["LeRep"]
+	tmp2["RegNo"] = tmp["RegNo"]
+	tmp2["EntType"] = tmp["EntType"]
+	tmp2["EntName"] = tmp["EntName"]
+	tmp2["EntTypeName"] = tmp["EntTypeName"]
+	tmp2["Dom"] = tmp["Dom"]
+	tmp2["EstDate"] = tmp["EstDate"]
+	tmp2["OpStateName"] = tmp["OpStateName"]
+	tmp2["OpScope"] = tmp["OpScope"]
+	tmp2["OpState"] = tmp["OpState"]
+	tmp2["s_submitid"] = tmp["s_submitid"]
+	tmp2["l_submittime"] = tmp["l_submittime"]
+	tmp2["s_submitname"] = tmp["s_submitname"]
+	tmp2["RegCapCurName"] = tmp["RegCapCurName"]
+	//增加营业状态排序
+	if tmp2["OpState"] == "06" {
+		tmp2["OpSint"] = true
+	} else {
+		tmp2["OpSint"] = false
+	}
+	tmp2["OpLocDistrict"] = tmp["OpLocDistrict"]
+	//增加代码转名称
+	tmpLoc, _ := tmp["OpLocDistrict"].(string)
+	tmp2["OpLocDistrictName"] = getLoc(tmpLoc, &LocCity)
+
+	tmp2["RecCap"] = tmp["RecCap"]
+	tmp2["RegCap"] = tmp["RegCap"]
+	tmp2["IndustryPhy"] = tmp["IndustryPhy"]
+	tmp2["IndustryPhyName"] = tmp["IndustryPhyName"]
+	tmp2["RegOrg"] = tmp["RegOrg"]
+	tmp2["RegOrgName"] = tmp["RegOrgName"]
+	tmp2["Tel"] = tmp["Tel"]
+	tmp2["CompForm"] = tmp["CompForm"]
+	tmp2["CompFormName"] = tmp["CompFormName"]
+	//增加异常名录标记 Ycml可能是bool也可能是string
+	Ycmlb, _ := tmp["Ycml"].(bool)
+	Ycmls, _ := tmp["Ycml"].(string)
+	if Ycmlb || Ycmls == "1" {
+		tmp2["Ycml"] = true
+	} else {
+		tmp2["Ycml"] = false
+	}
+	//增加年报联系信息
+	if tmp["Nb_email"] != nil {
+		tmp2["Nb_email"] = tmp["Nb_email"]
+	}
+	if tmp["Nb_tel"] != nil {
+		tmp2["Nb_tel"] = tmp["Nb_tel"]
+	}
+	if tmp["Nb_addr"] != nil {
+		tmp2["Nb_addr"] = tmp["Nb_addr"]
+	}
+
+	s_synopsis := tmp["s_synopsis"]
+	if s_synopsis == nil {
+		s_synopsis = ""
+	}
+	tmp2["s_synopsis"] = s_synopsis //企业简介
+
+	//股东
+	stock := getStock(tmp["investor"])
+	tmp2["stock"] = stock
+
+	tmp2["LegCerNO"] = tmp["LegCerNO"]
+	if tmp["s_microwebsite"] != nil {
+		tmp2["s_microwebsite"] = tmp["s_microwebsite"]
+	}
+
+	tmp2["SourceType"] = tmp["SourceType"] //数据来源
+	s_servicenames := tmp["s_servicenames"]
+	if s_servicenames == nil {
+		s_servicenames = ""
+	}
+	tmp2["s_servicenames"] = s_servicenames //服务名称
+	s_action := tmp["s_action"]
+	if s_action == nil {
+		s_action = "N"
+	}
+	tmp2["s_action"] = s_action
+	tmp2["s_persion"] = tmp["s_persion"]
+	tmp2["s_mobile"] = tmp["s_mobile"]
+	tmp2["s_enturl"] = tmp["s_enturl"]
+	tmp2["s_weixin"] = tmp["s_weixin"]
+	tmp2["s_avatar"] = tmp["s_avatar"]
+	return tmp2
+}
+
+func getStock(obj interface{}) string {
+	stock := ""
+	if ns, ok := obj.([]interface{}); ok {
+		stock = " "
+		for _, ns1 := range ns {
+			if nn, ok1 := ns1.(map[string]interface{}); ok1 {
+				tmp := fmt.Sprintf("%s", nn["Inv"])
+				if strings.Index(stock, tmp) < 0 {
+					stock += tmp + " "
+				}
+			}
+		}
+	}
+	return stock
+}
+
+func BulkSave(index, itype string, obj *[]map[string]interface{}, isDelBefore bool) {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		req := client.Bulk()
+		for _, v := range *obj {
+			if isDelBefore {
+				req = req.Add(es.NewBulkDeleteRequest().Index(index).Type(itype).Id(fmt.Sprintf("%v", v["_id"])))
+			}
+			req = req.Add(es.NewBulkIndexRequest().Index(index).Type(itype).Doc(v))
+		}
+		_, err := req.Do()
+		if err != nil {
+			log.Println("批量保存到ES出错", err.Error())
+		}
+	}
+}
+
+func Count(index, itype string, query interface{}) int64 {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	if client != nil {
+		defer func() {
+			if r := recover(); r != nil {
+				log.Println("[E]", r)
+				for skip := 1; ; skip++ {
+					_, file, line, ok := runtime.Caller(skip)
+					if !ok {
+						break
+					}
+					go log.Printf("%v,%v\n", file, line)
+				}
+			}
+		}()
+		var qq es.Query
+		if qs, ok := query.(string); ok {
+			temp := es.BoolQuery{
+				QueryStrings: qs,
+			}
+			qq = temp
+		} else if qi, ok2 := query.(es.Query); ok2 {
+			qq = qi
+		}
+		n, err := client.Count(index).Type(itype).Query(qq).Do()
+		if err != nil {
+			log.Println("统计出错", err.Error())
+		}
+
+		return n
+	}
+	return 0
+}
+
+//ngram精确查询
+/*
+{
+  "query": {
+    "bool": {
+      "should": [
+        {
+	"bool":{
+	  "must":[
+	  {     "multi_match": {
+            "query": "智能",
+            "type": "phrase",
+            "fields": [
+              "title"
+            ],
+           "analyzer": "my_ngram"
+          }
+        },{
+          "multi_match": {
+            "query": "机器",
+            "type": "phrase",
+            "fields": [
+              "title"
+            ],
+           "analyzer": "my_ngram"
+          }
+        },{
+          "multi_match": {
+            "query": "2016",
+            "type": "phrase",
+            "fields": [
+              "title"
+            ],
+           "analyzer": "my_ngram"
+          }
+	  }
+	  ]
+	}
+        },
+
+{
+	"bool":{
+	  "must":[
+	  {          "multi_match": {
+            "query": "河南",
+            "type": "phrase",
+            "fields": [
+              "title"
+            ],
+           "analyzer": "my_ngram"
+          }
+        },{
+          "multi_match": {
+            "query": "工商",
+            "type": "phrase",
+            "fields": [
+              "title"
+            ],
+           "analyzer": "my_ngram"
+          }
+        },{
+          "multi_match": {
+            "query": "2016",
+            "type": "phrase",
+            "fields": [
+              "title"
+            ],
+           "analyzer": "my_ngram"
+          }
+	  }
+	  ]
+	}
+        }
+      ],"minimum_should_match": 1
+    }
+  },
+  "_source": [
+    "_id",
+    "title"
+  ],
+  "from": 0,
+  "size": 10,
+  "sort": [{
+      "publishtime": "desc"
+    }]
+}
+
+*/
+//"2016+智能+办公,"河南+工商"
+//["2016+智能+办公","河南+工商"]
+//QStr = `{"query":{"bool":{should":[$or],"minimum_should_match" : 1}}}`
+//{"bool":{"must":[]}}
+//{"multi_match": {"query": "$word","type": "phrase", "fields": [$field],"analyzer": "my_ngram"}}
+//"highlight": {"pre_tags": [""],"post_tags": [""],"fields": {"detail": {"fragment_size": 1,"number_of_fragments": 1},"title": {"fragment_size": 1,"number_of_fragments": 1}}}
+const (
+	//此处最后少一个},正好NgramStr取[1:]多一个}
+	FilterQuery     = `{"query": {"filtered": {"filter": {"bool": {"must": [%s]}},%s}}`
+	NgramStr        = `{"query":{"bool":{"must":[%s],"should":[%s],"minimum_should_match": 1}}}`
+	NgramMust       = `{"bool":{"must":[%s]}}`
+	NgramMustAndNot = `{"bool":{"must":[%s],"must_not":[%s]}}`
+	minq            = `{"multi_match": {"query": "%s","type": "phrase", "fields": [%s]}}`
+	HL              = `"highlight": {"pre_tags": [""],"post_tags": [""],"fields": {%s}}`
+	highlightStr    = `%s: {"fragment_size": %d,"number_of_fragments": 1}`
+
+	FilterQuery_New  = `{"query":{"bool":{"must": [%s%s%s],"should":[]}}}`
+	MatchQueryString = `{"match": {%s: { "query":"%s", "operator": "and"}}}`
+	HL_New           = `"highlight": {"pre_tags": ["<HL>"],"post_tags": ["<HL>"],"fields": {%s}}`
+
+	//数据查询高亮标记2019-07-10
+	HL_MP = `"highlight": {"pre_tags": ["<HL>"],"post_tags": ["</HL>"],"fields": {%s}}`
+
+	ik_highlightStr = `%s: {"fragment_size": %d,"number_of_fragments": 1,"require_field_match": true}`
+	IK_pre_tags     = `<font class=\"es-highlight\">`
+	IK_post_tags    = `</font>`
+	HL_IK           = `"highlight": {"pre_tags": ["` + IK_pre_tags + `"],"post_tags": ["` + IK_post_tags + `"],"fields": {%s}}`
+)
+
+//替换了"号
+func GetNgramQuery(query interface{}, mustquery, findfields string) (qstr string) {
+	var words []string
+	if q, ok := query.(string); ok {
+		if q != "" {
+			words = strings.Split(q, ",")
+		}
+	} else if q, ok := query.([]string); ok {
+		words = q
+	} else if q, ok := query.([]interface{}); ok {
+		words = objArrToStringArr(q)
+	}
+	if words != nil {
+		new_minq := fmt.Sprintf(minq, "%s", findfields)
+		musts := []string{}
+		for _, qs_words := range words {
+			qws := strings.Split(qs_words, "+")
+			mq := []string{}
+			for _, qs_word := range qws {
+				mq = append(mq, fmt.Sprintf(new_minq, ReplaceYH(qs_word)))
+			}
+			musts = append(musts, fmt.Sprintf(NgramMust, strings.Join(mq, ",")))
+		}
+		qstr = fmt.Sprintf(NgramStr, mustquery, strings.Join(musts, ","))
+		//log.Println("ngram-query", qstr)
+	} else {
+		qstr = fmt.Sprintf(NgramStr, mustquery, "")
+	}
+	return
+}
+
+func GetNgramQuery_New(querystring, querymust interface{}, must, findfields string) (qstring string) {
+	querymust_string := ""
+	var wordsMust []string
+	if q, ok := querymust.(string); ok {
+		if q != "" {
+			wordsMust = strings.Split(q, ",")
+		}
+	} else if q, ok := querymust.([]string); ok {
+		wordsMust = q
+	} else if q, ok := querymust.([]interface{}); ok {
+		wordsMust = objArrToStringArr(q)
+	}
+	if wordsMust != nil {
+		new_minq := fmt.Sprintf(minq, "%s", findfields)
+		musts := []string{}
+		for _, qs_wordsMust := range wordsMust {
+			qws := strings.Split(qs_wordsMust, "+")
+			mq := []string{}
+			for _, qs_word := range qws {
+				mq = append(mq, fmt.Sprintf(new_minq, qs_word))
+			}
+			musts = append(musts, fmt.Sprintf(NgramMust, strings.Join(mq, ",")))
+		}
+		querymust_string = strings.Join(musts, ",")
+	}
+	//log.Println("must", must, querymust_string)
+
+	//querystring---------------------------------------------
+	query_string := ""
+	var querysShold []string
+	if q, ok := querystring.(string); ok {
+		if q != "" {
+			querysShold = strings.Split(q, ",")
+		}
+	} else if q, ok := querystring.([]string); ok {
+		querysShold = q
+	} else if q, ok := querystring.([]interface{}); ok {
+		querysShold = objArrToStringArr(q)
+	}
+	if querysShold != nil {
+		for k, name := range strings.Split(findfields, ",") {
+			for _, qs_querysShold := range querysShold {
+				if k > 0 {
+					query_string = query_string + "," + fmt.Sprintf(MatchQueryString, fmt.Sprint(name), qs_querysShold)
+				} else {
+					query_string = query_string + fmt.Sprintf(MatchQueryString, fmt.Sprint(name), qs_querysShold)
+				}
+			}
+		}
+	}
+	//log.Println("querystring", query_string)
+	if querymust_string == "" {
+		qstring = fmt.Sprintf(FilterQuery_New, must, query_string, querymust_string)
+	} else {
+		qstring = fmt.Sprintf(FilterQuery_New, must, query_string, ","+querymust_string)
+	}
+	return
+}
+func GetByNgram(index, itype string, query interface{}, mustquery, findfields, order, fields string, start, limit int) *[]map[string]interface{} {
+	return GetByNgramAll(index, itype, query, mustquery, findfields, order, fields, start, limit, false, false)
+}
+
+//增加高亮、过滤查询、高亮截取字数
+func GetByNgramOther(index, itype string, query interface{}, mustquery, findfields, order, fields string, start, limit int, highlight bool, filtermode bool, count int) *[]map[string]interface{} {
+	defer catch()
+	qstr := ""
+	if mustquery != "" && filtermode {
+		qstr = GetNgramQuery(query, "", findfields)
+		qstr = fmt.Sprintf(FilterQuery, mustquery, qstr[1:])
+	} else {
+		qstr = GetNgramQuery(query, mustquery, findfields)
+	}
+	if qstr != "" {
+		if highlight {
+			ws := []string{}
+			for _, w := range strings.Split(findfields, ",") {
+				ws = append(ws, fmt.Sprintf(highlightStr, w, count))
+			}
+			qstr = qstr[:len(qstr)-1] + `,` + fmt.Sprintf(HL, strings.Join(ws, ",")) + `}`
+		}
+		if len(fields) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"_source":[` + fields + "]}"
+		}
+		if len(order) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"sort":[` + SR(SR(SR(SR(order, ",", "},{", -1), " ", "", -1), ":-1", `:"desc"`, -1), ":1", `:"asc"`, -1) + `]}`
+		}
+		if start > -1 {
+			qstr = qstr[:len(qstr)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		//log.Println("ngram-find", qstr)
+		return Get(index, itype, qstr)
+	} else {
+		return nil
+	}
+}
+
+//增加高亮、过滤查询
+//替换了"号
+func GetByNgramAll(index, itype string, query interface{}, mustquery, findfields, order, fields string, start, limit int, highlight bool, filtermode bool) *[]map[string]interface{} {
+	defer catch()
+	qstr := ""
+	if mustquery != "" && filtermode {
+		qstr = GetNgramQuery(query, "", findfields)
+		qstr = fmt.Sprintf(FilterQuery, mustquery, qstr[1:])
+	} else {
+		qstr = GetNgramQuery(query, mustquery, findfields)
+	}
+	if qstr != "" {
+		if highlight {
+			ws := []string{}
+			for _, w := range strings.Split(findfields, ",") {
+				ws = append(ws, fmt.Sprintf(highlightStr, w, 1))
+			}
+			qstr = qstr[:len(qstr)-1] + `,` + fmt.Sprintf(HL, strings.Join(ws, ",")) + `}`
+		}
+		if len(fields) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"_source":[` + fields + "]}"
+		}
+		if strings.HasPrefix(order, "CUSTOM_") {
+			qstr = qstr[:len(qstr)-1] + `,` + strings.TrimLeft(order, "CUSTOM_") + `}`
+		} else if len(order) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"sort":[` + SR(SR(SR(SR(order, ",", "},{", -1), " ", "", -1), ":-1", `:"desc"`, -1), ":1", `:"asc"`, -1) + `]}`
+		}
+		if start > -1 {
+			qstr = qstr[:len(qstr)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		//log.Println("ngram-find", qstr)
+		return Get(index, itype, qstr)
+	} else {
+		return nil
+	}
+}
+
+//增加高亮、过滤查询
+func GetByNgramAll_New(index, itype string, querystring, querymust interface{}, mustquery, findfields, order, fields string, start, limit int, highlight bool, filtermode bool) *[]map[string]interface{} {
+	defer catch()
+	qstr := ""
+	if filtermode {
+		qstr = GetNgramQuery_New(querystring, querymust, mustquery, findfields)
+	} else {
+		qstr = GetNgramQuery_New(querystring, "", mustquery, findfields)
+	}
+	if qstr != "" {
+		if highlight {
+			ws := []string{}
+			for _, w := range strings.Split(findfields, ",") {
+				ws = append(ws, w+`:{"force_source": true}`)
+			}
+			qstr = qstr[:len(qstr)-1] + `,` + fmt.Sprintf(HL_New, strings.Join(ws, ",")) + `}`
+		}
+		if len(fields) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"_source":[` + fields + "]}"
+		}
+		if len(order) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"sort":[` + SR(SR(SR(SR(order, ",", ",", -1), " ", "", -1), ":-1", `:"desc"`, -1), ":1", `:"asc"`, -1) + `]}`
+		}
+		if start > -1 {
+			qstr = qstr[:len(qstr)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		//log.Println("ngram-find", order, qstr)
+		return Get(index, itype, qstr)
+	} else {
+		return nil
+	}
+}
+
+type KeyConfig struct {
+	Keys      []string `json:"key"`
+	NotKeys   []string `json:"notkey"`
+	InfoTypes []string `json:"infotype"`
+	Areas     []string `json:"area"`
+}
+
+//替换了"号
+func GetResForJY(index, itype string, keys []KeyConfig, allquery, findfields, SortQuery, fields string, start, limit int) *[]map[string]interface{} {
+	if len(keys) > 0 {
+		qstr := ""
+		new_minq := fmt.Sprintf(minq, "%s", findfields)
+		not_new_minq := fmt.Sprintf(minq, "%s", findfields) //排除词只查询标题
+		musts := []string{}
+		for _, qs_words := range keys {
+			mq := []string{}
+			notmq := []string{}
+			for _, qs_word := range qs_words.Keys {
+				mq = append(mq, fmt.Sprintf(new_minq, ReplaceYH(qs_word)))
+				/*
+					qs := AnalyzerWord("bidding", qs_word)
+					for _, qw := range qs {
+						mq = append(mq, fmt.Sprintf(new_minq, ReplaceYH(qw)))
+					}
+				*/
+			}
+			for _, qs_word := range qs_words.NotKeys {
+				notmq = append(notmq, fmt.Sprintf(not_new_minq, ReplaceYH(qs_word)))
+			}
+			if len(qs_words.Areas) > 0 {
+				mq = append(mq, fmt.Sprintf(`{"terms":{"area":["%s"]}}`, strings.Join(qs_words.Areas, `","`)))
+			}
+			if len(qs_words.InfoTypes) > 0 {
+				mq = append(mq, fmt.Sprintf(`{"terms":{"toptype":["%s"]}}`, strings.Join(qs_words.InfoTypes, `","`)))
+			}
+			musts = append(musts, fmt.Sprintf(NgramMustAndNot, strings.Join(mq, ","), strings.Join(notmq, ",")))
+		}
+		qstr = fmt.Sprintf(NgramStr, "", strings.Join(musts, ","))
+
+		qstr = fmt.Sprintf(FilterQuery, allquery, qstr[1:])
+		ws := []string{}
+		for _, w := range strings.Split(findfields, ",") {
+			ws = append(ws, fmt.Sprintf(highlightStr, w, 1))
+		}
+		qstr = qstr[:len(qstr)-1] + `,` + fmt.Sprintf(HL, strings.Join(ws, ",")) + `}`
+		if len(fields) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"_source":[` + fields + "]}"
+		}
+		if len(SortQuery) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"sort":` + SortQuery + `}`
+		}
+		if start > -1 {
+			qstr = qstr[:len(qstr)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		//log.Println("jy-ngram-find", qstr)
+		return Get(index, itype, qstr)
+	} else {
+		return nil
+	}
+}
+
+func ReplaceYH(src string) (rpl string) {
+	return strings.Replace(src, `"`, `\"`, -1)
+}
+
+//
+func GetAllByNgram(index, itype, qstr, findfields, order, fields string, start, limit, count int, highlight bool) *[]map[string]interface{} {
+	if qstr != "" {
+		if highlight {
+			ws := []string{}
+			for _, w := range strings.Split(findfields, ",") {
+				ws = append(ws, fmt.Sprintf(highlightStr, w, count))
+			}
+			qstr = qstr[:len(qstr)-1] + `,` + fmt.Sprintf(HL, strings.Join(ws, ",")) + `}`
+		}
+		if len(fields) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"_source":[` + fields + "]}"
+		}
+		if len(order) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"sort":[` + SR(SR(SR(SR(order, ",", "},{", -1), " ", "", -1), ":-1", `:"desc"`, -1), ":1", `:"asc"`, -1) + `]}`
+		}
+		if start > -1 {
+			qstr = qstr[:len(qstr)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		log.Println("GetAllByNgram:", qstr)
+		return Get(index, itype, qstr)
+	} else {
+		return nil
+	}
+}
+
+//数据标记2019-07-10
+func GetAllByNgram_MP(index, itype, qstr, findfields, order, fields string, start, limit, count int, highlight bool) *[]map[string]interface{} {
+	if qstr != "" {
+		if highlight {
+			ws := []string{}
+			for _, w := range strings.Split(findfields, ",") {
+				ws = append(ws, fmt.Sprintf(highlightStr, w, count))
+			}
+			qstr = qstr[:len(qstr)-1] + `,` + fmt.Sprintf(HL_MP, strings.Join(ws, ",")) + `}`
+		}
+		if len(fields) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"_source":[` + fields + "]}"
+		}
+		if len(order) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"sort":[` + SR(SR(SR(SR(order, ",", "},{", -1), " ", "", -1), ":-1", `:"desc"`, -1), ":1", `:"asc"`, -1) + `]}`
+		}
+		if start > -1 {
+			qstr = qstr[:len(qstr)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		//		log.Println("GetAllByNgram:", qstr)
+		return Get(index, itype, qstr)
+	} else {
+		return nil
+	}
+}
+
+//ik 分词
+func GetAllByIk(index, itype, qstr, findfields, order, fields string, start, limit, count int, highlight bool) *[]map[string]interface{} {
+	if qstr != "" {
+		if highlight {
+			ws := []string{}
+			for _, w := range strings.Split(findfields, ",") {
+				ws = append(ws, fmt.Sprintf(ik_highlightStr, w, count))
+			}
+			qstr = qstr[:len(qstr)-1] + `,` + fmt.Sprintf(HL_IK, strings.Join(ws, ",")) + `}`
+		}
+		if len(fields) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"_source":[` + fields + "]}"
+		}
+		if len(order) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"sort":[` + SR(SR(SR(SR(order, ",", "},{", -1), " ", "", -1), ":-1", `:"desc"`, -1), ":1", `:"asc"`, -1) + `]}`
+		}
+		if start > -1 {
+			qstr = qstr[:len(qstr)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		//log.Println("GetAllByNgram:", qstr)
+		return Get(index, itype, qstr)
+	} else {
+		return nil
+	}
+}
+
+//分词
+func AnalyzerWord(index, word string) (result []string) {
+	client := GetEsConn()
+	defer DestoryEsConn(client)
+	result = []string{}
+	p := url.Values{}
+	p["text"] = []string{word}
+	p["analyzer"] = []string{"ik"}
+	by, err := client.PerformRequest("GET", "/"+index+"/_analyze", p, nil)
+	if err != nil {
+		log.Println("AnalyzerWord Error:", err)
+		return
+	}
+	b, err := by.Body.MarshalJSON()
+	if err != nil {
+		log.Println("AnalyzerWord MarshalJSON Error:", err)
+		return
+	}
+	var res map[string][]map[string]interface{}
+	err = json.Unmarshal(b, &res)
+	if err != nil {
+		log.Println("AnalyzerWord Unmarshal Error:", err)
+		return
+	}
+	if res == nil {
+		return
+	}
+	for _, v := range res["tokens"] {
+		token, _ := v["token"].(string)
+		if token != "" {
+			result = append(result, token)
+		}
+	}
+	return
+}
+
+func GetResForJYView(index, itype string, keys []KeyConfig, allquery, findfields, SortQuery, fields string, start, limit int) *[]map[string]interface{} {
+	if len(keys) > 0 {
+		qstr := ""
+		new_minq := fmt.Sprintf(minq, "%s", findfields)
+		not_new_minq := fmt.Sprintf(minq, "%s", findfields) //排除词只查询标题
+		musts := []string{}
+		for _, qs_words := range keys {
+			mq := []string{}
+			notmq := []string{}
+			for _, qs_word := range qs_words.Keys {
+				mq = append(mq, fmt.Sprintf(new_minq, ReplaceYH(qs_word)))
+			}
+			for _, qs_word := range qs_words.NotKeys {
+				notmq = append(notmq, fmt.Sprintf(not_new_minq, ReplaceYH(qs_word)))
+			}
+			if len(qs_words.Areas) > 0 {
+				mq = append(mq, fmt.Sprintf(`{"terms":{"area":["%s"]}}`, strings.Join(qs_words.Areas, `","`)))
+			}
+			if len(qs_words.InfoTypes) > 0 {
+				mq = append(mq, fmt.Sprintf(`{"terms":{"toptype":["%s"]}}`, strings.Join(qs_words.InfoTypes, `","`)))
+			}
+			musts = append(musts, fmt.Sprintf(NgramMustAndNot, strings.Join(mq, ","), strings.Join(notmq, ",")))
+		}
+		qstr = fmt.Sprintf(NgramStr, "", strings.Join(musts, ","))
+
+		qstr = fmt.Sprintf(FilterQuery, allquery, qstr[1:])
+		ws := []string{}
+		for _, w := range strings.Split(findfields, ",") {
+			ws = append(ws, fmt.Sprintf(highlightStr, w, 1))
+		}
+		qstr = qstr[:len(qstr)-1] + `,` + fmt.Sprintf(HL, strings.Join(ws, ",")) + `}`
+		if len(fields) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"_source":[` + fields + "]}"
+		}
+		if len(SortQuery) > 0 {
+			qstr = qstr[:len(qstr)-1] + `,"sort":` + SortQuery + `}`
+		}
+		if start > -1 {
+			qstr = qstr[:len(qstr)-1] + `,"from":` + strconv.Itoa(start) + `,"size":` + strconv.Itoa(limit) + "}"
+		}
+		return Get(index, itype, qstr)
+	} else {
+		return nil
+	}
+}

+ 99 - 0
sword_base/elastic/elasticutil_Index_test.go

@@ -0,0 +1,99 @@
+package elastic
+
+import (
+	"encoding/json"
+	"log"
+	_ "log"
+	"net/url"
+
+	_ "gopkg.in/mgo.v2/bson"
+	_ "sfbase/olivere/elastic.v1"
+
+	"qfw/util/mongodb"
+	"testing"
+)
+
+func Test_DelIndex(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	client := GetEsConn()
+	client.DeleteIndex("zcfg_v1").Do()
+	//client.c("qfw", "qfw2").Do()
+	//l1 := time.Now().Unix()
+
+	//log.Println(util.LongToDate(l1, true))
+	DestoryEsConn(client)
+}
+func Test_createMapping(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	client := GetEsConn()
+	//client.DeleteIndex("qfw").Do()
+	var testMapping = `{"mappings":{
+		"enterprise":{
+			"_id" : {
+        	  "path" : "_id"
+        	},
+			"properties":{
+				"EntName" : {
+            		"type" : "string",
+            		"analyzer" : "ik"
+        		  }
+				}
+			}
+		}
+	}`
+	_ = testMapping
+
+	createIndex2, err := client.CreateIndex("qfw2").Do()
+	log.Println(createIndex2, err.Error())
+	DestoryEsConn(client)
+}
+
+func Test_saveMongodbToEs(t *testing.T) {
+	mongodb.InitMongodbPool(1, "192.168.3.14:27080", "qfw")
+	InitElastic("http://192.168.3.14:9800")
+	res := mongodb.Find("content", `{}`, nil, nil, false, -1, -1)
+	log.Println(len(*res))
+	BulkSave("content_v2", "content", res, false)
+}
+
+func Test_saveContentMongodbToEs(t *testing.T) {
+	mongodb.InitMongodbPool(1, "192.168.3.18:27080", "qfw")
+	InitElastic("http://192.168.3.18:9800")
+
+	res := mongodb.Find("enterprise", `{}`, nil, nil, false, -1, -1)
+	log.Println(len(*res))
+	BulkSave("enterprise", "enterprise", res, false)
+}
+
+func Test_find1(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	log.Println(GetPage("qfw_back", "enterprise", `{"RegOrgName":"中心"}`, "", `"RegOrgName"`, 1, 2))
+}
+
+func Test_Convert(t *testing.T) {
+	mongodb.InitMongodbPool(1, "192.168.3.14:27080", "qfw")
+	//InitElastic("http://192.168.3.14:9800")
+	log.Println(ConverData(mongodb.FindOne("enterprise", `{"_id":"556db9f0c2e8753072b52ee9"}`)))
+}
+
+func Test_saveToEs(t *testing.T) {
+	InitElasticSize("http://192.168.3.18:9800", 2)
+	Save("bidlibrary", "bidlibrary", map[string]interface{}{
+		"_id":   "556db9f0c2e8753072b52ee5",
+		"title": "你好,试试2",
+	})
+}
+
+func Test_word(t *testing.T) {
+	InitElasticSize("http://192.168.3.207:9700", 2)
+	es := GetEsConn()
+	defer DestoryEsConn(es)
+	p := url.Values{}
+	p["text"] = []string{"中华人民 共和国"}
+	p["analyzer"] = []string{"ik"}
+	by, err := es.PerformRequest("GET", "/test1/_analyze", p, nil)
+	b, _ := by.Body.MarshalJSON()
+	var res map[string]interface{}
+	json.Unmarshal(b, &res)
+	log.Println(res, err)
+}

+ 40 - 0
sword_base/elastic/elasticutil_senior_test.go

@@ -0,0 +1,40 @@
+package elastic
+
+import (
+	"encoding/json"
+	"log"
+	"testing"
+)
+
+func Test_findMatch(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	//精确查询
+	log.Println(Get("qfw", "enterprise", `{"query":{"match_phrase":{"EntName":"博爱县海江"}},"_source":["EntName"]}`))
+	//精确匹配未分词字段term
+	log.Println(Get("qfw", "enterprise", `{"query":{"term":{"EntType":"9100"}},"_source":["EntName","EntType"],"from":0,"size":2}`))
+	log.Println(Get("qfw", "enterprise", `{"query":{"term":{"ID":"141000011003678984"}},"_source":["EntName","ID"],"from":0,"size":2}`))
+	log.Println(",,,,", Get("qfw", "enterprise", `{"query":{"filtered":{"filter":{"terms":{"_id":{"index":"qfw","type":"service","id":"555adf797b793b3b52061ba9","path":"s_enterpriseid"}}}}},"_source":["EntName","OpStateName","_id","s_enterpriseid"]}`))
+
+	log.Println("id", GetById("qfw", "enterprise", "555446f6120d8f654414a81f", "555446f7120d8f654414a847"))
+
+	log.Println(",,22,,", GetPage("qfw", "enterprise", `{"_id":{"$in":["555446f6120d8f654414a81f", "555446f7120d8f654414a847"]}}`, "", `"EntName"`, 0, 5))
+
+	client := GetEsConn()
+	searchResult, _ := client.Search().Index("qfw").Indices("enterprise", "service").Source(`{"query":{"term":{"EntType":"9100"}},"_source":["EntName","OpStateName","_id","s_enterpriseid"]}`).Do()
+	var res []map[string]interface{}
+	if searchResult.Hits != nil {
+		resNum := len(searchResult.Hits.Hits)
+		if resNum < 5000 {
+			res = make([]map[string]interface{}, resNum)
+			for i, hit := range searchResult.Hits.Hits {
+				json.Unmarshal(*hit.Source, &res[i])
+			}
+		} else {
+			log.Println("查询结果太多,查询到:", resNum, "条")
+		}
+
+	}
+	log.Println(&res)
+	log.Println(Get("qfw", "enterprise", `{"query":{"prefix":{"OpLocDistrict":"41"}},"_source":["OpLocDistrict","EntName"]}`))
+	//log.Println(GetPage("qfw", "enterprise", `{"OpLocDistrict":"410802"}`, "", `"EntName"`, 0, 100))
+}

+ 76 - 0
sword_base/elastic/elasticutil_tcp_test.go

@@ -0,0 +1,76 @@
+package elastic
+
+import (
+	"log"
+	"testing"
+)
+
+func Test_tcp(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	b := make(chan bool, 1)
+	c := make(chan bool, 300)
+	for i := 0; i < 100000; i++ {
+		c <- true
+		go func(a int) {
+			/**
+			defer func() {
+				if r := recover(); r != nil {
+					log.Println("[E]", r)
+					for skip := 1; ; skip++ {
+						_, file, line, ok := runtime.Caller(skip)
+						if !ok {
+							break
+						}
+						go log.Printf("%v,%v\n", file, line)
+					}
+				}
+			}()
+			client := http.Client{
+				Transport: &http.Transport{
+					Dial: func(netw, addr string) (net.Conn, error) {
+						deadline := time.Now().Add(time.Duration(5) * time.Second)
+						c, err := net.DialTimeout(netw, addr, time.Duration(5)*time.Second)
+						if err != nil {
+							return nil, err
+						}
+						c.SetDeadline(deadline)
+						return c, nil
+					},
+				},
+			}
+			res, e := client.Get("http://192.168.3.14:9800/content/content/555ee61691db0a1360d11518")
+			if nil != res {
+				if res.StatusCode != 200 {
+					log.Println(res.StatusCode)
+				}
+				bs, _ := ioutil.ReadAll(res.Body)
+				res.Body.Close()
+				log.Println(a, string(bs))
+
+			} else {
+				log.Println(i, "res is null", e)
+			}
+			**/
+			log.Println(a, len(*GetPage("content", "content", `{}`, "", "", 0, 20)))
+			<-c
+		}(i)
+	}
+
+	<-b
+}
+
+func Test_clientNum(t *testing.T) {
+	s := make(chan bool, 10)
+	InitElasticSize("http://192.168.3.14:9800", 20)
+	size := 1000
+	for i := 0; i < size; i++ {
+		go func(i int) {
+			log.Println(i, len(*GetPage("enterprise", "enterprise", "{}", "", "", 0, 12)))
+		}(i)
+	}
+	<-s
+}
+
+func Test_makeQue(t *testing.T) {
+	log.Println(MakeQuery("{}", "", "", 0, 12))
+}

+ 248 - 0
sword_base/elastic/elasticutil_test.go

@@ -0,0 +1,248 @@
+package elastic
+
+import (
+	"fmt"
+	"log"
+	"qfw/util/mongodb"
+	"testing"
+	"time"
+
+	"gopkg.in/mgo.v2/bson"
+	_ "gopkg.in/olivere/elastic.v1"
+)
+
+func Test_Find1(t *testing.T) {
+	InitElasticSize("http://192.168.3.18:9800", 5)
+	log.Println(Get("service_v1", "service", `{"query":{"bool":{"must":[],"must_not":[],"should":[],"minimum_should_match" : 1}},"_source":["_id","s_name"],"sort":[{"l_createdate":"desc"}]}`))
+}
+
+func Test_save(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+
+	//client.CreateIndex("test").Body(testMapping).Do()
+	bid := bson.NewObjectId()
+	map1 := map[string]interface{}{
+		"_id":  bid,
+		"name": "不知道,就是你们我的",
+		"age":  503,
+		"addr": "可就不呀,2245555555是不是5555",
+	}
+	b := Save("test", "test", &map1)
+
+	log.Println(b, bid, bid.String())
+	//log.Println("保存结果", b, DelById("test", "test", bid.String()))
+}
+
+func Test_GoQ(t *testing.T) {
+	InitElastic("http://192.168.3.18:9800")
+	//"$中不支持range查询"
+	//res := GetPage("bidding", "bidding", `{"$or":[{"TERM_toptype":"拟建"},{"TERM_toptype":"结果"}],"extracttype":{"$gt":1},"TERM_area": "广东","$and":[{"TERM_area": "广东"}{"TERM_area": "广东"}]}`, `{"id":-1}`, `"title","toptype"`, 0, 50)
+	res := GetPage("bidding", "bidding", `{"$or":[{"TERM_toptype":"拟建"},{"TERM_toptype":"结果"}]}`, `{"publishtime":-1}`, `"title","toptype","publishtime"`, 0, 50)
+	log.Println(res)
+}
+
+func Test_get(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	a := make(chan int, 1)
+	for i := 0; i < 1000; i++ {
+		go func(b int) {
+			log.Println(b, len(*GetPage("content", "content", `{}`, "", "", 0, 20)))
+		}(i)
+	}
+	<-a
+
+	//Q1 := `{"name":"*走*"}`
+	//支持类mongodb的sql中的$and,$or $gt $lt
+	//Q1 := `{"age":{"$lt":11}}`
+	//log.Println("解析Q1,生成elastic所有的DSL,一般不直接调用:", AnalyQuery(Q1, "", QStr))
+
+	//log.Println("解析Q1、排序、显示字段,分页,生成elastic所有的DSL,一般不直接调用:", MakeQuery(Q1, `{"name":1,"age":-1}`, `"name","age"`, -1, 0))
+
+	//log.Println("分页查询,排序和显示字段,分页:", GetPage("test", "test", Q1, "", "", 0, 2))
+
+	//原生的查询方法,需要自己写DSL查询
+	//s := Get("test", `{"query" : { "bool": { "must":[{"query_string":{"default_field":"name","query":"*"}}]}}}`)
+	//s := Get("test", "test", `{"query":{"match_all":{}}}`)
+	//log.Println("查找全部", s)
+}
+
+func Test_GetById(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	log.Println("GetById,获取对象", GetById("test", "test", "1", "553ddbc7a75c550890000001"))
+}
+
+func Test_DelType(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	//Del("qfw", "enterprise", MakeQuery(`{}`, "", "", -1, -1))
+	c := GetEsConn()
+	//c.Delete().Index("qfw").Type("enterprise").Do()
+	//c.Refresh("qfw")
+	c.Alias().Add("qfw", "qfw_back").Do()
+}
+
+func Test_Del(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	/*
+		Save("test", "test", map[string]interface{}{
+			"_id":  291,
+			"name": "走啊走",
+			"name2": map[string]interface{}{
+				"aa": "完全不好使",
+				"bb": []int{1, 2, 3},
+				"cc": 29,
+			},
+			"age":  29,
+			"num":  112,
+			"addr": "可就不呀,224是不是",
+		})
+	*/
+	//log.Println(Get("test", "test", MakeQuery(`{"name2.aa":"0"}`, "", "", -1, -1)))
+	//log.Println("--:--", es.NewQueryStringQuery(`{"query":{"match":{"age":50}}}`).Query)
+	//log.Println("1:", MakeQuery(`{"_id":2}`, "", "", -1, -1))
+	//log.Println("Del方法,删除:", Del("test", "test", MakeQuery(`{"$and":[{"age":{"$gte":28}},{"age":{"$lt":30}}],"TERM_num":112,"name2.cc":29}`, "", "", -1, -1)))
+	//log.Println("Del方法,删除:", Del("test", "test", MakeQuery(`{}`, "", "", -1, -1)))
+	DelById("qfw", "content", "AU2P5rW95E6XJdgtD0Eh")
+}
+
+func Test_Update(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	log.Println("更新结果", Update("test", "test", "9", `ctx._source.age=101;ctx._source.name="张三"`))
+}
+
+func Test_gg(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	vv := Get("service", "service", `{"query":{"term":{"s_enterpriseid":"`+"555446f6120d8f654414a81f"+`"}},
+		_source:[`+`"_id"`+`],
+		sort:{"l_createdate":"desc"},
+		from:`+fmt.Sprintf("%v", 0)+`,
+		size:`+fmt.Sprintf("%v", 10)+`
+		}`)
+	log.Println(vv)
+}
+
+func Test_DelById(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	//	Save("test", "test", map[string]interface{}{
+	//		"_id":  101,
+	//		"name": "走啊走",
+	//		"name2": map[string]interface{}{
+	//			"aa": "完全不好使",
+	//		},
+	//		"age":  220,
+	//		"addr": "可就不呀,224是不是",
+	//	})
+	log.Println("删除结果", DelById("test", "test", "559334fb91db0a21a400000a"))
+}
+
+//先删除后增
+func Test_UpdateNewDoc(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	log.Println("完整更新对象", UpdateNewDoc("service", "service", `{"_id":56497eb0bbb93e2070000002,"name5":"我是新对象3~","tt":{"name":"内嵌的name"}}`))
+}
+
+//批量保存/更新
+func Test_BulkSave(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	//	res := []map[string]interface{}{
+	//		map[string]interface{}{
+	//			"_id":   51,
+	//			"name9": "map第11",
+	//		},
+	//		map[string]interface{}{
+	//			"_id":   52,
+	//			"name9": "map第22",
+	//		},
+	//		map[string]interface{}{
+	//			"_id":   53,
+	//			"name9": "map第33",
+	//		},
+	//	}
+	mongodb.InitMongodbPool(1, "192.168.3.14:27080", "qfw")
+	//	res := mongodb.Find("bidding", `{"infoformat":2}`, nil, nil, false, 0, 50)
+	//	for _, v := range *res {
+	//		Save("bidding", "bidding", v)
+	//	}
+	res := mongodb.FindById("bidding", "5833a0452d76c41b289b41f7", nil)
+	log.Println("res:", res)
+	Save("bidding", "bidding", res)
+	log.Println("批量保存对象:")
+	//BulkSave("test", "test", &res, true)
+}
+
+//批量更新字段
+func Test_BulkUpdate(t *testing.T) {
+	InitElastic("http://192.168.3.18:9800")
+	//regno 540192600031442,542422600000086
+	ids := []string{"55a85997a442ceca9e20de9f", "55a85997a442ceca9e20deaf"}
+	BulkUpdate("enterprise", "enterprise", ids, `ctx._source.Ycml=false`)
+}
+
+//获取数据,不受限制
+func Test_getNoLimit(t *testing.T) {
+	InitElasticSize("http://192.168.3.18:9800", 1)
+	k := "EntType"
+	v := "9600"
+	query := `{"query": {"bool":{"must":[{"query_string":{"default_field":"` + k + `","query":"` + v + `"}},{"query_string":{"default_field":"Ycml","query":true}}]}},"_source": ["_id","EntName"],"from": 0,"size": 999999}`
+	nt := GetNoLimit("enterprise", "enterprise", query)
+	log.Println("总数", len(*nt))
+}
+func Test_Count(t *testing.T) {
+	log.Println("ESCOUNT", Count("test", "test", MakeQuery(`{"name":"你们","TERM_age":503}`, "", "", -1, -1)))
+}
+
+func Test_tmp(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	DelById("qfw", "enterprise", "55633f50a442ab325175252b")
+}
+
+func Test_updateEntDoc(t *testing.T) {
+	InitElastic("http://192.168.3.14:9800")
+	mongodb.InitMongodbPool(1, "192.168.3.14:27080", "qfw")
+	UpdateEntDoc("556d858ec2e875307286f868")
+
+}
+func Test_first(t *testing.T) {
+	InitElasticSize("http://192.168.3.14:9800", 1)
+	nt := GetPage("content", "content", `{"releasetime": {"$lt": 1440773202}, "s_contenttype": "qykb"}`, `{"releasetime":-1}`, `{"s_title":1}`, 0, 2)
+	var i = 0
+	for _, v := range *nt {
+		log.Println("\n", v, i)
+		i++
+	}
+}
+
+func Test_date(t *testing.T) {
+	//"1448267541"
+	//s := time.Now().UnixNano()
+	//log.Println(s, time.Now().Unix(), fmt.Sprintf("%d", s)[4:12], 999999/60/60)
+	t1 := time.Now().Unix()
+	t2 := t1 + 10000
+
+	tt1 := time.Unix(t1, 0)
+	tt2 := time.Unix(t2, 0)
+
+	nt1 := time.Date(tt1.Year(), tt1.Month(), tt1.Day(), 0, 0, 0, 0, time.Local)
+	nt2 := time.Date(tt2.Year(), tt2.Month(), tt2.Day(), 0, 0, 0, 0, time.Local)
+
+	log.Println((nt2.Unix() - nt1.Unix()) / 86400)
+
+}
+
+func Test_Getpage(t *testing.T) {
+	InitElasticSize("http://192.168.3.18:9800", 1)
+	data := *GetPage("enterprise", "enterprise", `{"s_action":"01"}`, `{"l_submittime":-1}`, `"EntName","l_submittime","_id"`, 0, 8)
+	log.Println("data:", data)
+}
+
+func Test_q(t *testing.T) {
+	//log.Println(MakeQuery(`{"_id":"556349a6a442ab325177ade0"}`, "", `"_id"`, -1, -1))
+	//Count("enterprise","enterprise",)
+}
+
+func Test_bidding(t *testing.T) {
+	InitElasticSize("http://192.168.3.18:9800", 1)
+	//log.Println(Count("bidding_v1", "bidding", GetNgramQuery("河南+工商", `"title"`)))
+	log.Println(GetByNgramAll("bidding_v1", "bidding", "河南+郑州", `{"term":{"_id":"57aadfdf61a0721f1519c19d"}}`, `"title","detail"`, `{"publishtime":-1}`, `"title","_id"`, 0, 10, true, false))
+	//res := GetPage("bidding", "bidding", "", `{"comeintime":-1}`, `"_id"`, 0, 1)
+	//log.Println((*res)[0]["_id"])
+}

+ 50 - 0
sword_base/elastic/util.go

@@ -0,0 +1,50 @@
+package elastic
+
+import (
+	"encoding/json"
+	"log"
+	"runtime"
+	"strings"
+)
+
+//obj(string,M)转M,查询用到
+func objToMap(obj interface{}) *map[string]interface{} {
+	data := make(map[string]interface{})
+	if s, ok := obj.(string); ok {
+		json.Unmarshal([]byte(strings.Replace(s, "'", "\"", -1)), &data)
+	} else if s1, ok1 := obj.(map[string]interface{}); ok1 {
+		data = s1
+	} else if s1, ok1 := obj.(*map[string]interface{}); ok1 {
+		return s1
+	} else {
+		data = nil
+	}
+	return &data
+}
+
+//对象数组转成string数组
+func objArrToStringArr(old []interface{}) []string {
+	if old != nil {
+		new := make([]string, len(old))
+		for i, v := range old {
+			new[i], _ = v.(string)
+		}
+		return new
+	} else {
+		return nil
+	}
+}
+
+//出错拦截
+func catch() {
+	if r := recover(); r != nil {
+		log.Println(r)
+		for skip := 0; ; skip++ {
+			_, file, line, ok := runtime.Caller(skip)
+			if !ok {
+				break
+			}
+			go log.Printf("%v,%v\n", file, line)
+		}
+	}
+}

+ 785 - 0
sword_base/mongodb/mongodb.go

@@ -0,0 +1,785 @@
+package mongodb
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"log"
+	"math/big"
+	"reflect"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+
+	"go.mongodb.org/mongo-driver/bson"
+	"go.mongodb.org/mongo-driver/bson/primitive"
+	"go.mongodb.org/mongo-driver/mongo"
+	"go.mongodb.org/mongo-driver/mongo/options"
+)
+
+func NewMgo(addr, db string, size int) *MongodbSim {
+	mgo := &MongodbSim{
+		MongodbAddr: addr,
+		Size:        size,
+		DbName:      db,
+	}
+	mgo.InitPool()
+	return mgo
+}
+
+func NewMgoQyfw(addr, db string, size int, replSet string) *MongodbSim {
+	mgo := &MongodbSim{
+		MongodbAddr: addr,
+		Size:        size,
+		DbName:      db,
+	}
+	mgo.ReplSet = replSet
+	mgo.InitPool()
+	return mgo
+}
+
+type Bluk struct {
+	ms     *MgoSess
+	writes []mongo.WriteModel
+}
+
+func (b *Bluk) Insert(doc interface{}) {
+	write := mongo.NewInsertOneModel()
+	write.SetDocument(doc)
+	b.writes = append(b.writes, write)
+}
+func (b *Bluk) Update(doc ...interface{}) {
+	write := mongo.NewUpdateOneModel()
+	write.SetFilter(doc[0])
+	ue := ObjToM(doc[1])
+	autoUpdateTime(b.ms.db, b.ms.coll, ue)
+	write.SetUpdate(ue)
+	write.SetUpsert(false)
+	b.writes = append(b.writes, write)
+}
+func (b *Bluk) UpdateAll(doc ...interface{}) {
+	write := mongo.NewUpdateManyModel()
+	write.SetFilter(doc[0])
+	ue := ObjToM(doc[1])
+	autoUpdateTime(b.ms.db, b.ms.coll, ue)
+	write.SetUpdate(ue)
+	write.SetUpsert(false)
+	b.writes = append(b.writes, write)
+}
+func (b *Bluk) Upsert(doc ...interface{}) {
+	write := mongo.NewUpdateOneModel()
+	write.SetFilter(doc[0])
+	ue := ObjToM(doc[1])
+	autoUpdateTime(b.ms.db, b.ms.coll, ue)
+	write.SetUpdate(ue)
+	write.SetUpsert(true)
+	b.writes = append(b.writes, write)
+}
+func (b *Bluk) Remove(doc interface{}) {
+	write := mongo.NewDeleteOneModel()
+	write.SetFilter(doc)
+	b.writes = append(b.writes, write)
+}
+func (b *Bluk) RemoveAll(doc interface{}) {
+	write := mongo.NewDeleteManyModel()
+	write.SetFilter(doc)
+	b.writes = append(b.writes, write)
+}
+func (b *Bluk) Run() (*mongo.BulkWriteResult, error) {
+	return b.ms.M.C.Database(b.ms.db).Collection(b.ms.coll).BulkWrite(b.ms.M.Ctx, b.writes)
+}
+
+//
+type MgoIter struct {
+	Cursor *mongo.Cursor
+	Ctx    context.Context
+}
+
+func (mt *MgoIter) Next(result interface{}) bool {
+	if mt.Cursor != nil {
+		if mt.Cursor.Next(mt.Ctx) {
+			rType := reflect.TypeOf(result)
+			rVal := reflect.ValueOf(result)
+			if rType.Kind() == reflect.Ptr {
+				rType = rType.Elem()
+				rVal = rVal.Elem()
+			}
+			var err error
+			if rType.Kind() == reflect.Map {
+				r := make(map[string]interface{})
+				err = mt.Cursor.Decode(&r)
+				if rVal.CanSet() {
+					rVal.Set(reflect.ValueOf(r))
+				} else {
+					for it := rVal.MapRange(); it.Next(); {
+						rVal.SetMapIndex(it.Key(), reflect.Value{})
+					}
+					for it := reflect.ValueOf(r).MapRange(); it.Next(); {
+						rVal.SetMapIndex(it.Key(), it.Value())
+					}
+				}
+			} else {
+				err = mt.Cursor.Decode(&result)
+			}
+			if err != nil {
+				log.Println("mgo cur err", err.Error())
+				mt.Cursor.Close(mt.Ctx)
+				return false
+			}
+			return true
+		} else {
+			mt.Cursor.Close(mt.Ctx)
+			return false
+		}
+	} else {
+		return false
+	}
+}
+
+//
+type MgoSess struct {
+	db     string
+	coll   string
+	query  interface{}
+	sorts  []string
+	fields interface{}
+	limit  int64
+	skip   int64
+	pipe   []map[string]interface{}
+	all    interface{}
+	M      *MongodbSim
+}
+
+func (ms *MgoSess) DB(name string) *MgoSess {
+	ms.db = name
+	return ms
+}
+func (ms *MgoSess) C(name string) *MgoSess {
+	ms.coll = name
+	return ms
+}
+func (ms *MgoSess) Bulk() *Bluk {
+	return &Bluk{ms: ms}
+}
+func (ms *MgoSess) Find(q interface{}) *MgoSess {
+	if q == nil {
+		q = map[string]interface{}{}
+	}
+	ms.query = q
+	return ms
+}
+func (ms *MgoSess) FindId(_id interface{}) *MgoSess {
+	ms.query = map[string]interface{}{"_id": _id}
+	return ms
+}
+func (ms *MgoSess) Select(fields interface{}) *MgoSess {
+	ms.fields = fields
+	return ms
+}
+func (ms *MgoSess) Limit(limit int64) *MgoSess {
+	ms.limit = limit
+	return ms
+}
+func (ms *MgoSess) Skip(skip int64) *MgoSess {
+	ms.skip = skip
+	return ms
+}
+func (ms *MgoSess) Sort(sorts ...string) *MgoSess {
+	ms.sorts = sorts
+	return ms
+}
+func (ms *MgoSess) Pipe(p []map[string]interface{}) *MgoSess {
+	ms.pipe = p
+	return ms
+}
+func (ms *MgoSess) Insert(doc interface{}) error {
+	_, err := ms.M.C.Database(ms.db).Collection(ms.coll).InsertOne(ms.M.Ctx, doc)
+	return err
+}
+func (ms *MgoSess) Remove(filter interface{}) error {
+	_, err := ms.M.C.Database(ms.db).Collection(ms.coll).DeleteOne(ms.M.Ctx, filter)
+	return err
+}
+func (ms *MgoSess) RemoveId(_id interface{}) error {
+	_, err := ms.M.C.Database(ms.db).Collection(ms.coll).DeleteOne(ms.M.Ctx, map[string]interface{}{"_id": _id})
+	return err
+}
+func (ms *MgoSess) RemoveAll(filter interface{}) (*mongo.DeleteResult, error) {
+	return ms.M.C.Database(ms.db).Collection(ms.coll).DeleteMany(ms.M.Ctx, filter)
+}
+func (ms *MgoSess) Upsert(filter, update interface{}) (*mongo.UpdateResult, error) {
+	ct := options.Update()
+	ct.SetUpsert(true)
+	ue := ObjToM(update)
+	autoUpdateTime(ms.db, ms.coll, ue)
+	return ms.M.C.Database(ms.db).Collection(ms.coll).UpdateOne(ms.M.Ctx, filter, ue, ct)
+}
+func (ms *MgoSess) UpsertId(filter, update interface{}) (*mongo.UpdateResult, error) {
+	ct := options.Update()
+	ct.SetUpsert(true)
+	ue := ObjToM(update)
+	autoUpdateTime(ms.db, ms.coll, ue)
+	return ms.M.C.Database(ms.db).Collection(ms.coll).UpdateOne(ms.M.Ctx, map[string]interface{}{"_id": filter}, ue, ct)
+}
+func (ms *MgoSess) UpdateId(filter, update interface{}) error {
+	ue := ObjToM(update)
+	autoUpdateTime(ms.db, ms.coll, ue)
+	_, err := ms.M.C.Database(ms.db).Collection(ms.coll).UpdateOne(ms.M.Ctx, map[string]interface{}{"_id": filter}, ue)
+	return err
+}
+func (ms *MgoSess) Update(filter, update interface{}) error {
+	ue := ObjToM(update)
+	autoUpdateTime(ms.db, ms.coll, ue)
+	_, err := ms.M.C.Database(ms.db).Collection(ms.coll).UpdateOne(ms.M.Ctx, filter, ue)
+	return err
+}
+func (ms *MgoSess) Count() (int64, error) {
+	return ms.M.C.Database(ms.db).Collection(ms.coll).CountDocuments(ms.M.Ctx, ms.query)
+}
+func (ms *MgoSess) One(v *map[string]interface{}) {
+	of := options.FindOne()
+	of.SetProjection(ms.fields)
+	sr := ms.M.C.Database(ms.db).Collection(ms.coll).FindOne(ms.M.Ctx, ms.query, of)
+	if sr.Err() == nil {
+		sr.Decode(&v)
+	}
+}
+func (ms *MgoSess) All(v *[]map[string]interface{}) {
+	cur, err := ms.M.C.Database(ms.db).Collection(ms.coll).Aggregate(ms.M.Ctx, ms.pipe)
+	if err == nil && cur.Err() == nil {
+		cur.All(ms.M.Ctx, v)
+	}
+}
+func (ms *MgoSess) Iter() *MgoIter {
+	it := &MgoIter{}
+	find := options.Find()
+	if ms.skip > 0 {
+		find.SetSkip(ms.skip)
+	}
+	if ms.limit > 0 {
+		find.SetLimit(ms.limit)
+	}
+	find.SetBatchSize(100)
+	if len(ms.sorts) > 0 {
+		sort := bson.M{}
+		for _, k := range ms.sorts {
+			switch k[:1] {
+			case "-":
+				sort[k[1:]] = -1
+			case "+":
+				sort[k[1:]] = 1
+			default:
+				sort[k] = 1
+			}
+		}
+		find.SetSort(sort)
+	}
+	if ms.fields != nil {
+		find.SetProjection(ms.fields)
+	}
+	cur, err := ms.M.C.Database(ms.db).Collection(ms.coll).Find(ms.M.Ctx, ms.query, find)
+	if err != nil {
+		log.Println("mgo find err", err.Error())
+	} else {
+		it.Cursor = cur
+		it.Ctx = ms.M.Ctx
+	}
+	return it
+}
+
+type MongodbSim struct {
+	MongodbAddr string
+	Size        int
+	//	MinSize     int
+	DbName   string
+	C        *mongo.Client
+	Ctx      context.Context
+	ShortCtx context.Context
+	pool     chan bool
+	UserName string
+	Password string
+	ReplSet  string
+}
+
+func (m *MongodbSim) GetMgoConn() *MgoSess {
+	//m.Open()
+	ms := &MgoSess{}
+	ms.M = m
+	return ms
+}
+
+func (m *MongodbSim) DestoryMongoConn(ms *MgoSess) {
+	//m.Close()
+	ms.M = nil
+	ms = nil
+}
+
+func (m *MongodbSim) Destroy() {
+	//m.Close()
+	m.C.Disconnect(nil)
+	m.C = nil
+}
+
+func (m *MongodbSim) InitPool() {
+	opts := options.Client()
+	registry := bson.NewRegistryBuilder().RegisterTypeMapEntry(bson.TypeArray, reflect.TypeOf([]interface{}{})).Build()
+	opts.SetRegistry(registry)
+	opts.SetConnectTimeout(3 * time.Second)
+	opts.SetHosts(strings.Split(m.MongodbAddr, ","))
+	//opts.ApplyURI("mongodb://" + m.MongodbAddr)
+	opts.SetMaxPoolSize(uint64(m.Size))
+	if m.UserName != "" && m.Password != "" {
+		cre := options.Credential{
+			Username: m.UserName,
+			Password: m.Password,
+		}
+		opts.SetAuth(cre)
+	}
+	/*ms := strings.Split(m.MongodbAddr, ",")
+	if m.ReplSet == "" && len(ms) > 1 {
+		m.ReplSet = "qfws"
+	}*/
+	if m.ReplSet != "" {
+		opts.SetReplicaSet(m.ReplSet)
+		opts.SetDirect(false)
+	}
+	m.pool = make(chan bool, m.Size)
+	opts.SetMaxConnIdleTime(2 * time.Hour)
+	m.Ctx, _ = context.WithTimeout(context.Background(), 99999*time.Hour)
+	m.ShortCtx, _ = context.WithTimeout(context.Background(), 1*time.Minute)
+	client, err := mongo.Connect(m.ShortCtx, opts)
+	if err != nil {
+		log.Println("mgo init error:", err.Error())
+	} else {
+		m.C = client
+	}
+}
+
+func (m *MongodbSim) Open() {
+	m.pool <- true
+}
+func (m *MongodbSim) Close() {
+	<-m.pool
+}
+
+func (m *MongodbSim) Save(c string, doc interface{}) string {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	coll := m.C.Database(m.DbName).Collection(c)
+	obj := ObjToM(doc)
+	id := primitive.NewObjectID()
+	(*obj)["_id"] = id
+	_, err := coll.InsertOne(m.Ctx, obj)
+	if nil != err {
+		log.Println("SaveError", err)
+		return ""
+	}
+	return id.Hex()
+}
+
+//原_id不变
+func (m *MongodbSim) SaveByOriID(c string, doc interface{}) bool {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	coll := m.C.Database(m.DbName).Collection(c)
+	_, err := coll.InsertOne(m.Ctx, ObjToM(doc))
+	if nil != err {
+		log.Println("SaveByOriIDError", err)
+		return false
+	}
+	return true
+}
+
+//批量插入
+func (m *MongodbSim) SaveBulk(c string, doc ...map[string]interface{}) bool {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	coll := m.C.Database(m.DbName).Collection(c)
+	var writes []mongo.WriteModel
+	for _, d := range doc {
+		write := mongo.NewInsertOneModel()
+		write.SetDocument(d)
+		writes = append(writes, write)
+	}
+	br, e := coll.BulkWrite(m.Ctx, writes)
+	if e != nil {
+		b := strings.Index(e.Error(), "duplicate") > -1
+		log.Println("mgo savebulk error:", e.Error())
+		if br != nil {
+			log.Println("mgo savebulk size:", br.InsertedCount)
+		}
+		return b
+	}
+	return true
+}
+
+//批量插入
+func (m *MongodbSim) SaveBulkInterface(c string, doc ...interface{}) bool {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	coll := m.C.Database(m.DbName).Collection(c)
+	var writes []mongo.WriteModel
+	for _, d := range doc {
+		write := mongo.NewInsertOneModel()
+		write.SetDocument(d)
+		writes = append(writes, write)
+	}
+	br, e := coll.BulkWrite(m.Ctx, writes)
+	if e != nil {
+		b := strings.Index(e.Error(), "duplicate") > -1
+		log.Println("mgo SaveBulkInterface error:", e.Error())
+		if br != nil {
+			log.Println("mgo SaveBulkInterface size:", br.InsertedCount)
+		}
+		return b
+	}
+	return true
+}
+
+//按条件统计
+func (m *MongodbSim) Count(c string, q interface{}) int {
+	r, _ := m.CountByErr(c, q)
+	return r
+}
+
+//统计
+func (m *MongodbSim) CountByErr(c string, q interface{}) (int, error) {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	res, err := m.C.Database(m.DbName).Collection(c).CountDocuments(m.Ctx, ObjToM(q))
+	if err != nil {
+		log.Println("统计错误", err.Error())
+		return 0, err
+	} else {
+		return int(res), nil
+	}
+}
+
+//按条件删除
+func (m *MongodbSim) Delete(c string, q interface{}) int64 {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	res, err := m.C.Database(m.DbName).Collection(c).DeleteMany(m.Ctx, ObjToM(q))
+	if err != nil && res == nil {
+		log.Println("删除错误", err.Error())
+	}
+	return res.DeletedCount
+}
+
+//删除对象
+func (m *MongodbSim) Del(c string, q interface{}) bool {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	_, err := m.C.Database(m.DbName).Collection(c).DeleteMany(m.Ctx, ObjToM(q))
+	if err != nil {
+		log.Println("删除错误", err.Error())
+		return false
+	}
+	return true
+}
+
+//按条件更新
+func (m *MongodbSim) Update(c string, q, u interface{}, upsert bool, multi bool) bool {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	ct := options.Update()
+	if upsert {
+		ct.SetUpsert(true)
+	}
+	coll := m.C.Database(m.DbName).Collection(c)
+	ue := ObjToM(u)
+	autoUpdateTime(m.DbName, c, ue)
+	var err error
+	if multi {
+		_, err = coll.UpdateMany(m.Ctx, ObjToM(q), ue, ct)
+	} else {
+		_, err = coll.UpdateOne(m.Ctx, ObjToM(q), ue, ct)
+	}
+	if err != nil {
+		log.Println("更新错误", err.Error())
+		return false
+	}
+	return true
+}
+func (m *MongodbSim) UpdateById(c string, id interface{}, set interface{}) bool {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	q := make(map[string]interface{})
+	if sid, ok := id.(string); ok {
+		q["_id"], _ = primitive.ObjectIDFromHex(sid)
+	} else {
+		q["_id"] = id
+	}
+	ue := ObjToM(set)
+	autoUpdateTime(m.DbName, c, ue)
+	_, err := m.C.Database(m.DbName).Collection(c).UpdateOne(m.Ctx, q, ue)
+	if nil != err {
+		log.Println("UpdateByIdError", err)
+		return false
+	}
+	return true
+}
+
+//批量更新
+func (m *MongodbSim) UpdateBulkAll(db, c string, doc ...[]map[string]interface{}) bool {
+	return m.NewUpdateBulk(db, c, false, false, doc...)
+}
+
+func (m *MongodbSim) UpdateBulk(c string, doc ...[]map[string]interface{}) bool {
+	return m.UpdateBulkAll(m.DbName, c, doc...)
+}
+
+//批量插入
+func (m *MongodbSim) UpSertBulk(c string, doc ...[]map[string]interface{}) bool {
+	return m.NewUpdateBulk(m.DbName, c, true, false, doc...)
+}
+
+//批量插入
+func (m *MongodbSim) UpSertMultiBulk(c string, upsert, multi bool, doc ...[]map[string]interface{}) bool {
+	return m.NewUpdateBulk(m.DbName, c, upsert, multi, doc...)
+}
+
+//批量插入
+func (m *MongodbSim) NewUpdateBulk(db, c string, upsert, multi bool, doc ...[]map[string]interface{}) bool {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	coll := m.C.Database(db).Collection(c)
+	var writes []mongo.WriteModel
+	for _, d := range doc {
+		if multi {
+			write := mongo.NewUpdateManyModel()
+			write.SetFilter(d[0])
+			ue := ObjToM(d[1])
+			autoUpdateTime(m.DbName, c, ue)
+			write.SetUpdate(ue)
+			write.SetUpsert(upsert)
+			writes = append(writes, write)
+		} else {
+			write := mongo.NewUpdateOneModel()
+			write.SetFilter(d[0])
+			ue := ObjToM(d[1])
+			autoUpdateTime(m.DbName, c, ue)
+			write.SetUpdate(ue)
+			write.SetUpsert(upsert)
+			writes = append(writes, write)
+		}
+	}
+	br, e := coll.BulkWrite(m.Ctx, writes)
+	if e != nil {
+		log.Println("mgo upsert error:", e.Error())
+		return br == nil || br.UpsertedCount == 0
+	}
+	//	else {
+	//		if r.UpsertedCount != int64(len(doc)) {
+	//			log.Println("mgo upsert uncomplete:uc/dc", r.UpsertedCount, len(doc))
+	//		}
+	//		return true
+	//	}
+	return true
+}
+
+//查询单条对象
+func (m *MongodbSim) FindOne(c string, query interface{}) (*map[string]interface{}, bool) {
+	return m.FindOneByField(c, query, nil)
+}
+
+//查询单条对象
+func (m *MongodbSim) FindOneByField(c string, query interface{}, fields interface{}) (*map[string]interface{}, bool) {
+	defer catch()
+	res, ok := m.Find(c, query, nil, fields, true, -1, -1)
+	if nil != res && len(*res) > 0 {
+		return &((*res)[0]), ok
+	}
+	return nil, ok
+}
+
+//查询单条对象
+func (m *MongodbSim) FindById(c string, query string, fields interface{}) (*map[string]interface{}, bool) {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	of := options.FindOne()
+	of.SetProjection(ObjToOth(fields))
+	res := make(map[string]interface{})
+	_id, err := primitive.ObjectIDFromHex(query)
+	if err != nil {
+		log.Println("_id error", err)
+		return &res, true
+	}
+	sr := m.C.Database(m.DbName).Collection(c).FindOne(m.Ctx, map[string]interface{}{"_id": _id}, of)
+	if sr.Err() == nil {
+		sr.Decode(&res)
+	}
+	return &res, true
+}
+
+//底层查询方法
+func (m *MongodbSim) Find(c string, query interface{}, order interface{}, fields interface{}, single bool, start int, limit int) (*[]map[string]interface{}, bool) {
+	defer catch()
+	m.Open()
+	defer m.Close()
+	var res []map[string]interface{}
+	coll := m.C.Database(m.DbName).Collection(c)
+	if single {
+		res = make([]map[string]interface{}, 1)
+		of := options.FindOne()
+		of.SetProjection(ObjToOth(fields))
+		of.SetSort(ObjToM(order))
+		if sr := coll.FindOne(m.Ctx, ObjToM(query), of); sr.Err() == nil {
+			sr.Decode(&res[0])
+		}
+	} else {
+		res = []map[string]interface{}{}
+		of := options.Find()
+		of.SetProjection(ObjToOth(fields))
+		of.SetSort(ObjToM(order))
+		if start > -1 {
+			of.SetSkip(int64(start))
+			of.SetLimit(int64(limit))
+		}
+		cur, err := coll.Find(m.Ctx, ObjToM(query), of)
+		if err == nil && cur.Err() == nil {
+			cur.All(m.Ctx, &res)
+		}
+	}
+	return &res, true
+}
+
+func ObjToOth(query interface{}) *bson.M {
+	return ObjToMQ(query, false)
+}
+func ObjToM(query interface{}) *bson.M {
+	return ObjToMQ(query, true)
+}
+
+//obj(string,M)转M,查询用到
+func ObjToMQ(query interface{}, isQuery bool) *bson.M {
+	data := make(bson.M)
+	defer catch()
+	if s2, ok2 := query.(*map[string]interface{}); ok2 {
+		data = bson.M(*s2)
+	} else if s3, ok3 := query.(*bson.M); ok3 {
+		return s3
+	} else if s3, ok3 := query.(*primitive.M); ok3 {
+		return s3
+	} else if s, ok := query.(string); ok {
+		json.Unmarshal([]byte(strings.Replace(s, "'", "\"", -1)), &data)
+		if ss, oks := data["_id"]; oks && isQuery {
+			switch ss.(type) {
+			case string:
+				data["_id"], _ = primitive.ObjectIDFromHex(ss.(string))
+			case map[string]interface{}:
+				tmp := ss.(map[string]interface{})
+				for k, v := range tmp {
+					tmp[k], _ = primitive.ObjectIDFromHex(v.(string))
+				}
+				data["_id"] = tmp
+			}
+
+		}
+	} else if s1, ok1 := query.(map[string]interface{}); ok1 {
+		data = s1
+	} else if s4, ok4 := query.(bson.M); ok4 {
+		data = s4
+	} else if s4, ok4 := query.(primitive.M); ok4 {
+		data = s4
+	} else {
+		data = nil
+	}
+	return &data
+}
+func intAllDef(num interface{}, defaultNum int) int {
+	if i, ok := num.(int); ok {
+		return int(i)
+	} else if i0, ok0 := num.(int32); ok0 {
+		return int(i0)
+	} else if i1, ok1 := num.(float64); ok1 {
+		return int(i1)
+	} else if i2, ok2 := num.(int64); ok2 {
+		return int(i2)
+	} else if i3, ok3 := num.(float32); ok3 {
+		return int(i3)
+	} else if i4, ok4 := num.(string); ok4 {
+		in, _ := strconv.Atoi(i4)
+		return int(in)
+	} else if i5, ok5 := num.(int16); ok5 {
+		return int(i5)
+	} else if i6, ok6 := num.(int8); ok6 {
+		return int(i6)
+	} else if i7, ok7 := num.(*big.Int); ok7 {
+		in, _ := strconv.Atoi(fmt.Sprint(i7))
+		return int(in)
+	} else if i8, ok8 := num.(*big.Float); ok8 {
+		in, _ := strconv.Atoi(fmt.Sprint(i8))
+		return int(in)
+	} else {
+		return defaultNum
+	}
+}
+
+//出错拦截
+func catch() {
+	if r := recover(); r != nil {
+		log.Println(r)
+		for skip := 0; ; skip++ {
+			_, file, line, ok := runtime.Caller(skip)
+			if !ok {
+				break
+			}
+			go log.Printf("%v,%v\n", file, line)
+		}
+	}
+}
+
+//根据bsonID转string
+func BsonIdToSId(uid interface{}) string {
+	if uid == nil {
+		return ""
+	} else if u, ok := uid.(string); ok {
+		return u
+	} else if u, ok := uid.(primitive.ObjectID); ok {
+		return u.Hex()
+	} else {
+		return ""
+	}
+}
+
+func StringTOBsonId(id string) (bid primitive.ObjectID) {
+	defer catch()
+	if id != "" {
+		bid, _ = primitive.ObjectIDFromHex(id)
+	}
+	return
+}
+
+func ToObjectIds(ids []string) []primitive.ObjectID {
+	_ids := []primitive.ObjectID{}
+	for _, v := range ids {
+		_id, _ := primitive.ObjectIDFromHex(v)
+		_ids = append(_ids, _id)
+	}
+	return _ids
+}
+
+//自动添加更新时间
+func autoUpdateTime(db, coll string, ue *bson.M) {
+	if db == "qfw" && coll == "user" {
+		set := ObjToM((*ue)["$set"])
+		if *set == nil {
+			set = &bson.M{}
+		}
+		(*set)["auto_updatetime"] = time.Now().Unix()
+		(*ue)["$set"] = set
+	}
+}

+ 127 - 0
sword_base/mongodb/mongodb_test.go

@@ -0,0 +1,127 @@
+package mongodb
+
+import (
+	"log"
+	"testing"
+
+	"go.mongodb.org/mongo-driver/bson"
+)
+
+func Test_add(t *testing.T) {
+	m := &MongodbSim{
+		MongodbAddr: "192.168.3.128:27090",
+		Size:        5,
+		DbName:      "wcj",
+		UserName:    "admin",
+		Password:    "123456",
+	}
+	m.InitPool()
+	// log.Println(m.Save("test", map[string]interface{}{
+	// 	"name": "张三",
+	// 	"age":  12,
+	// }))
+	// log.Println(m.SaveByOriID("test", map[string]interface{}{
+	// 	"name": "张三",
+	// 	"age":  25,
+	// }))
+	log.Println(m.SaveBulkInterface("test", []interface{}{
+		map[string]interface{}{
+			"name": "张三1",
+			"age":  1,
+		},
+		map[string]interface{}{
+			"name": "张三2",
+			"age":  2,
+		},
+	}...))
+}
+func Test_find(t *testing.T) {
+	m := &MongodbSim{
+		MongodbAddr: "192.168.3.128:27080",
+		Size:        5,
+		DbName:      "qfw",
+	}
+	m.InitPool()
+	list, _ := m.Find("test", map[string]interface{}{
+		"name": "张三",
+	}, map[string]interface{}{"age": -1}, map[string]interface{}{"age": 1, "name": 1, "_id": 0}, false, -1, -1)
+	log.Println(len(*list))
+}
+func Test_update(t *testing.T) {
+	m := &MongodbSim{
+		MongodbAddr: "192.168.3.128:27080",
+		Size:        5,
+		DbName:      "qfw",
+		//UserName:    "admin",
+		//Password:    "123456",
+	}
+	m.InitPool()
+	log.Println(m.Update("user", map[string]interface{}{
+		"s_phone": "15037870765",
+	}, map[string]interface{}{
+		"$unset": map[string]interface{}{
+			"s_name": "",
+		},
+	}, false, false))
+	return
+	s := [][]map[string]interface{}{
+		[]map[string]interface{}{
+			map[string]interface{}{"name": "李四111"},
+			map[string]interface{}{"$set": map[string]interface{}{"type": 1}},
+		},
+		[]map[string]interface{}{
+			map[string]interface{}{"name": "张三111"},
+			map[string]interface{}{"$set": map[string]interface{}{"type": 2}},
+		},
+	}
+	one := m.UpdateBulk("test", s...)
+	log.Println(one)
+}
+func Test_count(t *testing.T) {
+	m := &MongodbSim{
+		MongodbAddr: "192.168.3.128:27090",
+		Size:        5,
+		DbName:      "wcj",
+		UserName:    "admin",
+		Password:    "123456",
+	}
+	m.InitPool()
+	one := m.Count("test", bson.M{
+		"name": "张三",
+	})
+	log.Println(one)
+}
+func Test_del(t *testing.T) {
+	m := &MongodbSim{
+		MongodbAddr: "192.168.3.128:27090",
+		Size:        5,
+		DbName:      "wcj",
+		UserName:    "admin",
+		Password:    "123456",
+	}
+	m.InitPool()
+	one := m.Del("test", nil)
+	log.Println(one)
+}
+func Test_itor(t *testing.T) {
+	m := &MongodbSim{
+		MongodbAddr: "192.168.3.128:27080",
+		Size:        5,
+		DbName:      "wcj",
+	}
+	m.InitPool()
+	sess := m.GetMgoConn()
+	it := sess.DB("wcj").C("jyopenplatform_user").Find(map[string]interface{}{
+		"i_status": 1,
+		"i_freeze": 1,
+	}).Select(map[string]interface{}{
+		"a_power":          1,
+		"s_m_openid":       1,
+		"o_rulepro":        1,
+		"s_appid":          1,
+		"s_enterprisename": 1,
+	}).Sort("_id").Iter()
+	for user := make(map[string]interface{}); it.Next(&user); {
+		log.Println(user["_id"], user["s_appid"])
+	}
+}

+ 27 - 0
sword_base/olivere/elastic.v1/CONTRIBUTING.md

@@ -0,0 +1,27 @@
+# How to contribute
+
+Elastic is an open-source project and we are looking forward to each
+contribution.
+
+## Your Pull Request
+
+To make it easy to review and understand your changes, please keep the
+following things in mind before submitting your pull request:
+
+* Work on the latest possible state of `olivere/elastic`.
+* Create a branch dedicated to your change.
+* If possible, write a test case which confirms your change.
+* Make sure your changes and your tests work with all recent versions of
+  Elasticsearch. At the moment, we're targeting the current and the previous
+  release, e.g. the 1.4 and the 1.3 branch.
+* Test your changes before creating a pull request (`go test ./...`).
+* Don't mix several features or bug fixes in one pull request.
+* Create a meaningful commit message.
+* Explain your change, e.g. provide a link to the issue you are fixing and
+  probably a link to the Elasticsearch documentation and/or source code.
+* Format your source with `go fmt`.
+
+## Additional Resources
+
+* [GitHub documentation](http://help.github.com/)
+* [GitHub pull request documentation](http://help.github.com/send-pull-requests/)

+ 20 - 0
sword_base/olivere/elastic.v1/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+Copyright © 2012-2015 Oliver Eilhard
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the “Software”), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.

+ 388 - 0
sword_base/olivere/elastic.v1/README.md

@@ -0,0 +1,388 @@
+# Elastic
+
+Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the
+[Go](http://www.golang.org/) programming language.
+
+[![Build Status](https://travis-ci.org/olivere/elastic.svg?branch=master)](https://travis-ci.org/olivere/elastic)
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/olivere/elastic)
+[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE)
+
+See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic.
+
+
+## Releases
+
+### Current version
+
+This is the source code of the current version of Elastic (version 2).
+
+### Earlier versions
+
+If you came from an earlier version and found that you cannot update, don't
+worry. Earlier versions are still available. All you need to do is go-get
+them and change your import path. See below for details. Here's what you
+need to do to use Elastic version 1:
+
+```sh
+$ go get gopkg.in/olivere/elastic.v1
+```
+
+Then change your import path:
+
+```go
+import "gopkg.in/olivere/elastic.v1"
+```
+
+
+## Status
+
+We use Elastic in production since 2012. Although Elastic is quite stable
+from our experience, we don't have a stable API yet. The reason for this
+is that Elasticsearch changes quite often and at a fast pace.
+At this moment we focus on features, not on a stable API.
+
+Having said that, there have been no big API changes that required you
+to rewrite your application big time.
+More often than not it's renaming APIs and adding/removing features
+so that we are in sync with the Elasticsearch API.
+
+Elastic supports and has been tested in production with
+the following Elasticsearch versions: 0.90, 1.0, 1.1, 1.2, 1.3, and 1.4.
+
+Elasticsearch has quite a few features. A lot of them are
+not yet implemented in Elastic (see below for details).
+I add features and APIs as required. It's straightforward
+to implement missing pieces. I'm accepting pull requests :-)
+
+Having said that, I hope you find the project useful.
+
+
+## Usage
+
+The first thing you do is to create a Client. The client connects to
+Elasticsearch on http://127.0.0.1:9200 by default.
+
+You typically create one client for your app. Here's a complete example.
+
+```go
+// Create a client
+client, err := elastic.NewClient()
+if err != nil {
+    // Handle error
+}
+
+// Create an index
+_, err = client.CreateIndex("twitter").Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+
+// Add a document to the index
+tweet := Tweet{User: "olivere", Message: "Take Five"}
+_, err = client.Index().
+    Index("twitter").
+    Type("tweet").
+    Id("1").
+    BodyJson(tweet).
+    Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+
+// Search with a term query
+termQuery := elastic.NewTermQuery("user", "olivere")
+searchResult, err := client.Search().
+    Index("twitter").   // search in index "twitter"
+    Query(&termQuery).  // specify the query
+    Sort("user", true). // sort by "user" field, ascending
+    From(0).Size(10).   // take documents 0-9
+    Debug(true).        // print request and response to stdout
+    Pretty(true).       // pretty print request and response JSON
+    Do()                // execute
+if err != nil {
+    // Handle error
+    panic(err)
+}
+
+// searchResult is of type SearchResult and returns hits, suggestions,
+// and all kinds of other information from Elasticsearch.
+fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+// Each is a convenience function that iterates over hits in a search result.
+// It makes sure you don't need to check for nil values in the response.
+// However, it ignores errors in serialization. If you want full control
+// over iterating the hits, see below.
+var ttyp Tweet
+for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+    t := item.(Tweet)
+    fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+}
+// TotalHits is another convenience function that works even when something goes wrong.
+fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+// Here's how you iterate through results with full control over each step.
+if searchResult.Hits != nil {
+    fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+    // Iterate through results
+    for _, hit := range searchResult.Hits.Hits {
+        // hit.Index contains the name of the index
+
+        // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+        var t Tweet
+        err := json.Unmarshal(*hit.Source, &t)
+        if err != nil {
+            // Deserialization failed
+        }
+
+        // Work with tweet
+        fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+    }
+} else {
+    // No hits
+    fmt.Print("Found no tweets\n")
+}
+
+// Delete the index again
+_, err = client.DeleteIndex("twitter").Do()
+if err != nil {
+    // Handle error
+    panic(err)
+}
+```
+
+See the [wiki](https://github.com/olivere/elastic/wiki) for more details.
+
+
+## API Status
+
+Here's the current API status.
+
+### APIs
+
+- [x] Search (most queries, filters, facets, aggregations etc. are implemented: see below)
+- [x] Index
+- [x] Get
+- [x] Delete
+- [x] Delete By Query
+- [x] Update
+- [x] Multi Get
+- [x] Bulk
+- [ ] Bulk UDP
+- [ ] Term vectors
+- [ ] Multi term vectors
+- [x] Count
+- [ ] Validate
+- [x] Explain
+- [x] Search
+- [ ] Search shards
+- [x] Search template
+- [x] Facets (most are implemented, see below)
+- [x] Aggregates (most are implemented, see below)
+- [x] Multi Search
+- [x] Percolate
+- [ ] More like this
+- [ ] Benchmark
+
+### Indices
+
+- [x] Create index
+- [x] Delete index
+- [x] Get index
+- [x] Indices exists
+- [x] Open/close index
+- [x] Put mapping
+- [x] Get mapping
+- [ ] Get field mapping
+- [x] Types exist
+- [x] Delete mapping
+- [x] Index aliases
+- [ ] Update indices settings
+- [x] Get settings
+- [ ] Analyze
+- [x] Index templates
+- [ ] Warmers
+- [ ] Status
+- [x] Indices stats
+- [ ] Indices segments
+- [ ] Indices recovery
+- [ ] Clear cache
+- [x] Flush
+- [x] Refresh
+- [x] Optimize
+- [ ] Upgrade
+
+### Snapshot and Restore
+
+- [ ] Snapshot
+- [ ] Restore
+- [ ] Snapshot status
+- [ ] Monitoring snapshot/restore progress
+- [ ] Partial restore
+
+### Cat APIs
+
+Not implemented. Those are better suited for operating with Elasticsearch
+on the command line.
+
+### Cluster
+
+- [x] Health
+- [x] State
+- [ ] Stats
+- [ ] Pending cluster tasks
+- [ ] Cluster reroute
+- [ ] Cluster update settings
+- [ ] Nodes stats
+- [x] Nodes info
+- [ ] Nodes hot_threads
+- [ ] Nodes shutdown
+
+### Query DSL
+
+#### Queries
+
+- [x] `match`
+- [x] `multi_match`
+- [x] `bool`
+- [x] `boosting`
+- [ ] `common_terms`
+- [ ] `constant_score`
+- [x] `dis_max`
+- [x] `filtered`
+- [x] `fuzzy_like_this_query` (`flt`)
+- [x] `fuzzy_like_this_field_query` (`flt_field`)
+- [x] `function_score`
+- [x] `fuzzy`
+- [ ] `geo_shape`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `match_all`
+- [x] `mlt`
+- [x] `mlt_field`
+- [x] `nested`
+- [x] `prefix`
+- [x] `query_string`
+- [x] `simple_query_string`
+- [x] `range`
+- [x] `regexp`
+- [ ] `span_first`
+- [ ] `span_multi_term`
+- [ ] `span_near`
+- [ ] `span_not`
+- [ ] `span_or`
+- [ ] `span_term`
+- [x] `term`
+- [x] `terms`
+- [ ] `top_children`
+- [x] `wildcard`
+- [ ] `minimum_should_match`
+- [ ] `multi_term_query_rewrite`
+- [x] `template_query`
+
+#### Filters
+
+- [x] `and`
+- [x] `bool`
+- [x] `exists`
+- [ ] `geo_bounding_box`
+- [x] `geo_distance`
+- [ ] `geo_distance_range`
+- [x] `geo_polygon`
+- [ ] `geoshape`
+- [ ] `geohash`
+- [x] `has_child`
+- [x] `has_parent`
+- [x] `ids`
+- [ ] `indices`
+- [x] `limit`
+- [x] `match_all`
+- [x] `missing`
+- [x] `nested`
+- [x] `not`
+- [x] `or`
+- [x] `prefix`
+- [x] `query`
+- [x] `range`
+- [x] `regexp`
+- [ ] `script`
+- [x] `term`
+- [x] `terms`
+- [x] `type`
+
+### Facets
+
+- [x] Terms
+- [x] Range
+- [x] Histogram
+- [x] Date Histogram
+- [x] Filter
+- [x] Query
+- [x] Statistical
+- [x] Terms Stats
+- [x] Geo Distance
+
+### Aggregations
+
+- [x] min
+- [x] max
+- [x] sum
+- [x] avg
+- [x] stats
+- [x] extended stats
+- [x] value count
+- [x] percentiles
+- [x] percentile ranks
+- [x] cardinality
+- [x] geo bounds
+- [x] top hits
+- [ ] scripted metric
+- [x] global
+- [x] filter
+- [x] filters
+- [x] missing
+- [x] nested
+- [x] reverse nested
+- [x] children
+- [x] terms
+- [x] significant terms
+- [x] range
+- [x] date range
+- [x] ipv4 range
+- [x] histogram
+- [x] date histogram
+- [x] geo distance
+- [x] geohash grid
+
+### Sorting
+
+- [x] Sort by score
+- [x] Sort by field
+- [x] Sort by geo distance
+- [x] Sort by script
+
+### Scan
+
+Scrolling through documents (e.g. `search_type=scan`) are implemented via
+the `Scroll` and `Scan` services. The `ClearScroll` API is implemented as well.
+
+## How to contribute
+
+Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md).
+
+## Credits
+
+Thanks a lot for the great folks working hard on
+[Elasticsearch](http://www.elasticsearch.org/)
+and
+[Go](http://www.golang.org/).
+
+## LICENSE
+
+MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/)
+or the LICENSE file provided in the repository for details.
+

+ 107 - 0
sword_base/olivere/elastic.v1/alias.go

@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+)
+
+type AliasService struct {
+	client  *Client
+	actions []aliasAction
+	pretty  bool
+}
+
+type aliasAction struct {
+	// "add" or "remove"
+	Type string
+	// Index name
+	Index string
+	// Alias name
+	Alias string
+	// Filter
+	Filter *Filter
+}
+
+func NewAliasService(client *Client) *AliasService {
+	builder := &AliasService{
+		client:  client,
+		actions: make([]aliasAction, 0),
+	}
+	return builder
+}
+
+func (s *AliasService) Pretty(pretty bool) *AliasService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *AliasService) Add(indexName string, aliasName string) *AliasService {
+	action := aliasAction{Type: "add", Index: indexName, Alias: aliasName}
+	s.actions = append(s.actions, action)
+	return s
+}
+
+func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter *Filter) *AliasService {
+	action := aliasAction{Type: "add", Index: indexName, Alias: aliasName, Filter: filter}
+	s.actions = append(s.actions, action)
+	return s
+}
+
+func (s *AliasService) Remove(indexName string, aliasName string) *AliasService {
+	action := aliasAction{Type: "remove", Index: indexName, Alias: aliasName}
+	s.actions = append(s.actions, action)
+	return s
+}
+
+func (s *AliasService) Do() (*AliasResult, error) {
+	// Build url
+	path := "/_aliases"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Actions
+	body := make(map[string]interface{})
+	actionsJson := make([]interface{}, 0)
+
+	for _, action := range s.actions {
+		actionJson := make(map[string]interface{})
+		detailsJson := make(map[string]interface{})
+		detailsJson["index"] = action.Index
+		detailsJson["alias"] = action.Alias
+		if action.Filter != nil {
+			detailsJson["filter"] = (*action.Filter).Source()
+		}
+		actionJson[action.Type] = detailsJson
+		actionsJson = append(actionsJson, actionJson)
+	}
+
+	body["actions"] = actionsJson
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return results
+	ret := new(AliasResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasResult struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 123 - 0
sword_base/olivere/elastic.v1/alias_test.go

@@ -0,0 +1,123 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+const (
+	testAliasName = "elastic-test-alias"
+)
+
+func TestAliasLifecycle(t *testing.T) {
+	var err error
+
+	client := setupTestClientAndCreateIndex(t)
+
+	// Some tweets
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
+	tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+	// Add tweets to first index
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Add tweets to second index
+	_, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Flush
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Flush().Index(testIndexName2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	/*
+		// Alias should not yet exist
+		aliasesResult1, err := client.Aliases().Do()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if len(aliasesResult1.Indices) != 0 {
+			t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 0, len(aliasesResult1.Indices))
+		}
+	*/
+
+	// Add both indices to a new alias
+	aliasCreate, err := client.Alias().
+		Add(testIndexName, testAliasName).
+		Add(testIndexName2, testAliasName).
+		//Pretty(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !aliasCreate.Acknowledged {
+		t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
+	}
+
+	// Search should return all 3 tweets
+	matchAll := NewMatchAllQuery()
+	searchResult1, err := client.Search().Index(testAliasName).Query(&matchAll).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if searchResult1.Hits == nil {
+		t.Errorf("expected SearchResult.Hits != nil; got nil")
+	}
+	if searchResult1.Hits.TotalHits != 3 {
+		t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 3, searchResult1.Hits.TotalHits)
+	}
+
+	/*
+		// Alias should return both indices
+		aliasesResult2, err := client.Aliases().Do()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if len(aliasesResult2.Indices) != 2 {
+			t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
+		}
+	*/
+
+	// Remove first index should remove two tweets, so should only yield 1
+	aliasRemove1, err := client.Alias().
+		Remove(testIndexName, testAliasName).
+		//Pretty(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !aliasRemove1.Acknowledged {
+		t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
+	}
+
+	searchResult2, err := client.Search().Index(testAliasName).Query(&matchAll).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if searchResult2.Hits == nil {
+		t.Errorf("expected SearchResult.Hits != nil; got nil")
+	}
+	if searchResult2.Hits.TotalHits != 1 {
+		t.Errorf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult2.Hits.TotalHits)
+	}
+
+}

+ 160 - 0
sword_base/olivere/elastic.v1/aliases.go

@@ -0,0 +1,160 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type AliasesService struct {
+	client  *Client
+	indices []string
+	pretty  bool
+}
+
+func NewAliasesService(client *Client) *AliasesService {
+	builder := &AliasesService{
+		client:  client,
+		indices: make([]string, 0),
+	}
+	return builder
+}
+
+func (s *AliasesService) Pretty(pretty bool) *AliasesService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *AliasesService) Index(indexName string) *AliasesService {
+	s.indices = append(s.indices, indexName)
+	return s
+}
+
+func (s *AliasesService) Indices(indexNames ...string) *AliasesService {
+	s.indices = append(s.indices, indexNames...)
+	return s
+}
+
+func (s *AliasesService) Do() (*AliasesResult, error) {
+	var err error
+
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err = uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	path += strings.Join(indexPart, ",")
+
+	// TODO Add types here
+
+	// Search
+	path += "/_aliases"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// {
+	//   "indexName" : {
+	//     "aliases" : {
+	//       "alias1" : { },
+	//       "alias2" : { }
+	//     }
+	//   },
+	//   "indexName2" : {
+	//     ...
+	//   },
+	// }
+	indexMap := make(map[string]interface{})
+	if err := json.Unmarshal(res.Body, &indexMap); err != nil {
+		return nil, err
+	}
+
+	// Each (indexName, _)
+	ret := &AliasesResult{
+		Indices: make(map[string]indexResult),
+	}
+	for indexName, indexData := range indexMap {
+		indexOut, found := ret.Indices[indexName]
+		if !found {
+			indexOut = indexResult{Aliases: make([]aliasResult, 0)}
+		}
+
+		// { "aliases" : { ... } }
+		indexDataMap, ok := indexData.(map[string]interface{})
+		if ok {
+			aliasesData, ok := indexDataMap["aliases"].(map[string]interface{})
+			if ok {
+				for aliasName, _ := range aliasesData {
+					aliasRes := aliasResult{AliasName: aliasName}
+					indexOut.Aliases = append(indexOut.Aliases, aliasRes)
+				}
+			}
+		}
+
+		ret.Indices[indexName] = indexOut
+	}
+
+	return ret, nil
+}
+
+// -- Result of an alias request.
+
+type AliasesResult struct {
+	Indices map[string]indexResult
+}
+
+type indexResult struct {
+	Aliases []aliasResult
+}
+
+type aliasResult struct {
+	AliasName string
+}
+
+func (ar AliasesResult) IndicesByAlias(aliasName string) []string {
+	indices := make([]string, 0)
+
+	for indexName, indexInfo := range ar.Indices {
+		for _, aliasInfo := range indexInfo.Aliases {
+			if aliasInfo.AliasName == aliasName {
+				indices = append(indices, indexName)
+			}
+		}
+	}
+
+	return indices
+}
+
+func (ir indexResult) HasAlias(aliasName string) bool {
+	for _, alias := range ir.Aliases {
+		if alias.AliasName == aliasName {
+			return true
+		}
+	}
+	return false
+}

+ 146 - 0
sword_base/olivere/elastic.v1/aliases_test.go

@@ -0,0 +1,146 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestAliases(t *testing.T) {
+	var err error
+
+	client := setupTestClientAndCreateIndex(t)
+
+	// Some tweets
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Cycling is fun."}
+	tweet3 := tweet{User: "olivere", Message: "Another unrelated topic."}
+
+	// Add tweets to first index
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Add tweets to second index
+	_, err = client.Index().Index(testIndexName2).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Flush
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Flush().Index(testIndexName2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Alias should not yet exist
+	aliasesResult1, err := client.Aliases().
+		Indices(testIndexName, testIndexName2).
+		//Pretty(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(aliasesResult1.Indices) != 2 {
+		t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult1.Indices))
+	}
+	for indexName, indexDetails := range aliasesResult1.Indices {
+		if len(indexDetails.Aliases) != 0 {
+			t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
+		}
+	}
+
+	// Add both indices to a new alias
+	aliasCreate, err := client.Alias().
+		Add(testIndexName, testAliasName).
+		Add(testIndexName2, testAliasName).
+		//Pretty(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !aliasCreate.Acknowledged {
+		t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasCreate.Acknowledged)
+	}
+
+	// Alias should now exist
+	aliasesResult2, err := client.Aliases().
+		Indices(testIndexName, testIndexName2).
+		//Pretty(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(aliasesResult2.Indices) != 2 {
+		t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult2.Indices))
+	}
+	for indexName, indexDetails := range aliasesResult2.Indices {
+		if len(indexDetails.Aliases) != 1 {
+			t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
+		}
+	}
+
+	// Check the reverse function:
+	indexInfo1, found := aliasesResult2.Indices[testIndexName]
+	if !found {
+		t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
+	}
+	aliasFound := indexInfo1.HasAlias(testAliasName)
+	if !aliasFound {
+		t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName, aliasFound)
+	}
+
+	// Check the reverse function:
+	indexInfo2, found := aliasesResult2.Indices[testIndexName2]
+	if !found {
+		t.Errorf("expected info about index %s = %v; got %v", testIndexName, true, found)
+	}
+	aliasFound = indexInfo2.HasAlias(testAliasName)
+	if !aliasFound {
+		t.Errorf("expected alias %s to include index %s; got %v", testAliasName, testIndexName2, aliasFound)
+	}
+
+	// Remove first index should remove two tweets, so should only yield 1
+	aliasRemove1, err := client.Alias().
+		Remove(testIndexName, testAliasName).
+		//Pretty(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !aliasRemove1.Acknowledged {
+		t.Errorf("expected AliasResult.Acknowledged %v; got %v", true, aliasRemove1.Acknowledged)
+	}
+
+	// Alias should now exist only for index 2
+	aliasesResult3, err := client.Aliases().Indices(testIndexName, testIndexName2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(aliasesResult3.Indices) != 2 {
+		t.Errorf("expected len(AliasesResult.Indices) = %d; got %d", 2, len(aliasesResult3.Indices))
+	}
+	for indexName, indexDetails := range aliasesResult3.Indices {
+		if indexName == testIndexName {
+			if len(indexDetails.Aliases) != 0 {
+				t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 0, len(indexDetails.Aliases))
+			}
+		} else if indexName == testIndexName2 {
+			if len(indexDetails.Aliases) != 1 {
+				t.Errorf("expected len(AliasesResult.Indices[%s].Aliases) = %d; got %d", indexName, 1, len(indexDetails.Aliases))
+			}
+		} else {
+			t.Errorf("got index %s", indexName)
+		}
+	}
+}

+ 301 - 0
sword_base/olivere/elastic.v1/bulk.go

@@ -0,0 +1,301 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type BulkService struct {
+	client *Client
+
+	index    string
+	_type    string
+	requests []BulkableRequest
+	//replicationType string
+	//consistencyLevel string
+	timeout string
+	refresh *bool
+	pretty  bool
+}
+
+func NewBulkService(client *Client) *BulkService {
+	builder := &BulkService{
+		client:   client,
+		requests: make([]BulkableRequest, 0),
+	}
+	return builder
+}
+
+func (s *BulkService) reset() {
+	s.requests = make([]BulkableRequest, 0)
+}
+
+func (s *BulkService) Index(index string) *BulkService {
+	s.index = index
+	return s
+}
+
+func (s *BulkService) Type(_type string) *BulkService {
+	s._type = _type
+	return s
+}
+
+func (s *BulkService) Timeout(timeout string) *BulkService {
+	s.timeout = timeout
+	return s
+}
+
+func (s *BulkService) Refresh(refresh bool) *BulkService {
+	s.refresh = &refresh
+	return s
+}
+
+func (s *BulkService) Pretty(pretty bool) *BulkService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *BulkService) Add(r BulkableRequest) *BulkService {
+	s.requests = append(s.requests, r)
+	return s
+}
+
+func (s *BulkService) NumberOfActions() int {
+	return len(s.requests)
+}
+
+func (s *BulkService) bodyAsString() (string, error) {
+	buf := bytes.NewBufferString("")
+
+	for _, req := range s.requests {
+		source, err := req.Source()
+		if err != nil {
+			return "", err
+		}
+		for _, line := range source {
+			_, err := buf.WriteString(fmt.Sprintf("%s\n", line))
+			if err != nil {
+				return "", nil
+			}
+		}
+	}
+
+	return buf.String(), nil
+}
+
+func (s *BulkService) Do() (*BulkResponse, error) {
+	// No actions?
+	if s.NumberOfActions() == 0 {
+		return nil, errors.New("elastic: No bulk actions to commit")
+	}
+
+	// Get body
+	body, err := s.bodyAsString()
+	if err != nil {
+		return nil, err
+	}
+
+	// Build url
+	path := "/"
+	if s.index != "" {
+		index, err := uritemplates.Expand("{index}", map[string]string{
+			"index": s.index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		path += index + "/"
+	}
+	if s._type != "" {
+		typ, err := uritemplates.Expand("{type}", map[string]string{
+			"type": s._type,
+		})
+		if err != nil {
+			return nil, err
+		}
+		path += typ + "/"
+	}
+	path += "_bulk"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.refresh != nil {
+		params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return results
+	ret := new(BulkResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+
+	// Reset so the request can be reused
+	s.reset()
+
+	return ret, nil
+}
+
+// BulkResponse is a response to a bulk execution.
+//
+// Example:
+// {
+//   "took":3,
+//   "errors":false,
+//   "items":[{
+//     "index":{
+//       "_index":"index1",
+//       "_type":"tweet",
+//       "_id":"1",
+//       "_version":3,
+//       "status":201
+//     }
+//   },{
+//     "index":{
+//       "_index":"index2",
+//       "_type":"tweet",
+//       "_id":"2",
+//       "_version":3,
+//       "status":200
+//     }
+//   },{
+//     "delete":{
+//       "_index":"index1",
+//       "_type":"tweet",
+//       "_id":"1",
+//       "_version":4,
+//       "status":200,
+//       "found":true
+//     }
+//   },{
+//     "update":{
+//       "_index":"index2",
+//       "_type":"tweet",
+//       "_id":"2",
+//       "_version":4,
+//       "status":200
+//     }
+//   }]
+// }
+type BulkResponse struct {
+	Took   int                            `json:"took,omitempty"`
+	Errors bool                           `json:"errors,omitempty"`
+	Items  []map[string]*BulkResponseItem `json:"items,omitempty"`
+}
+
+// BulkResponseItem is the result of a single bulk request.
+type BulkResponseItem struct {
+	Index   string `json:"_index,omitempty"`
+	Type    string `json:"_type,omitempty"`
+	Id      string `json:"_id,omitempty"`
+	Version int    `json:"_version,omitempty"`
+	Status  int    `json:"status,omitempty"`
+	Found   bool   `json:"found,omitempty"`
+	Error   string `json:"error,omitempty"`
+}
+
+// Indexed returns all bulk request results of "index" actions.
+func (r *BulkResponse) Indexed() []*BulkResponseItem {
+	return r.ByAction("index")
+}
+
+// Created returns all bulk request results of "create" actions.
+func (r *BulkResponse) Created() []*BulkResponseItem {
+	return r.ByAction("create")
+}
+
+// Updated returns all bulk request results of "update" actions.
+func (r *BulkResponse) Updated() []*BulkResponseItem {
+	return r.ByAction("update")
+}
+
+// Deleted returns all bulk request results of "delete" actions.
+func (r *BulkResponse) Deleted() []*BulkResponseItem {
+	return r.ByAction("delete")
+}
+
+// ByAction returns all bulk request results of a certain action,
+// e.g. "index" or "delete".
+func (r *BulkResponse) ByAction(action string) []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	items := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		if result, found := item[action]; found {
+			items = append(items, result)
+		}
+	}
+	return items
+}
+
+// ById returns all bulk request results of a given document id,
+// regardless of the action ("index", "delete" etc.).
+func (r *BulkResponse) ById(id string) []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	items := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		for _, result := range item {
+			if result.Id == id {
+				items = append(items, result)
+			}
+		}
+	}
+	return items
+}
+
+// Failed returns those items of a bulk response that have errors,
+// i.e. those that don't have a status code between 200 and 299.
+func (r *BulkResponse) Failed() []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	errors := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		for _, result := range item {
+			if !(result.Status >= 200 && result.Status <= 299) {
+				errors = append(errors, result)
+			}
+		}
+	}
+	return errors
+}
+
+// Succeeded returns those items of a bulk response that have no errors,
+// i.e. those have a status code between 200 and 299.
+func (r *BulkResponse) Succeeded() []*BulkResponseItem {
+	if r.Items == nil {
+		return nil
+	}
+	succeeded := make([]*BulkResponseItem, 0)
+	for _, item := range r.Items {
+		for _, result := range item {
+			if result.Status >= 200 && result.Status <= 299 {
+				succeeded = append(succeeded, result)
+			}
+		}
+	}
+	return succeeded
+}

+ 112 - 0
sword_base/olivere/elastic.v1/bulk_delete_request.go

@@ -0,0 +1,112 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// -- Bulk delete request --
+
+// Bulk request to remove document from Elasticsearch.
+type BulkDeleteRequest struct {
+	BulkableRequest
+	index       string
+	typ         string
+	id          string
+	routing     string
+	refresh     *bool
+	version     int64  // default is MATCH_ANY
+	versionType string // default is "internal"
+}
+
+func NewBulkDeleteRequest() *BulkDeleteRequest {
+	return &BulkDeleteRequest{}
+}
+
+func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest {
+	r.index = index
+	return r
+}
+
+func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest {
+	r.typ = typ
+	return r
+}
+
+func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest {
+	r.id = id
+	return r
+}
+
+func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest {
+	r.routing = routing
+	return r
+}
+
+func (r *BulkDeleteRequest) Refresh(refresh bool) *BulkDeleteRequest {
+	r.refresh = &refresh
+	return r
+}
+
+func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest {
+	r.version = version
+	return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest {
+	r.versionType = versionType
+	return r
+}
+
+func (r *BulkDeleteRequest) String() string {
+	lines, err := r.Source()
+	if err == nil {
+		return strings.Join(lines, "\n")
+	}
+	return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkDeleteRequest) Source() ([]string, error) {
+	lines := make([]string, 1)
+
+	source := make(map[string]interface{})
+	deleteCommand := make(map[string]interface{})
+	if r.index != "" {
+		deleteCommand["_index"] = r.index
+	}
+	if r.typ != "" {
+		deleteCommand["_type"] = r.typ
+	}
+	if r.id != "" {
+		deleteCommand["_id"] = r.id
+	}
+	if r.routing != "" {
+		deleteCommand["_routing"] = r.routing
+	}
+	if r.version > 0 {
+		deleteCommand["_version"] = r.version
+	}
+	if r.versionType != "" {
+		deleteCommand["_version_type"] = r.versionType
+	}
+	if r.refresh != nil {
+		deleteCommand["refresh"] = *r.refresh
+	}
+	source["delete"] = deleteCommand
+
+	body, err := json.Marshal(source)
+	if err != nil {
+		return nil, err
+	}
+
+	lines[0] = string(body)
+
+	return lines, nil
+}

+ 42 - 0
sword_base/olivere/elastic.v1/bulk_delete_request_test.go

@@ -0,0 +1,42 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestBulkDeleteRequestSerialization(t *testing.T) {
+	tests := []struct {
+		Request  BulkableRequest
+		Expected []string
+	}{
+		// #0
+		{
+			Request: NewBulkDeleteRequest().Index("index1").Type("tweet").Id("1"),
+			Expected: []string{
+				`{"delete":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+			},
+		},
+	}
+
+	for i, test := range tests {
+		lines, err := test.Request.Source()
+		if err != nil {
+			t.Fatalf("case #%d: expected no error, got: %v", i, err)
+		}
+		if lines == nil {
+			t.Fatalf("case #%d: expected lines, got nil", i)
+		}
+		if len(lines) != len(test.Expected) {
+			t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+		}
+		for j, line := range lines {
+			if line != test.Expected[j] {
+				t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+			}
+		}
+	}
+}

+ 173 - 0
sword_base/olivere/elastic.v1/bulk_index_request.go

@@ -0,0 +1,173 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// Bulk request to add document to Elasticsearch.
+type BulkIndexRequest struct {
+	BulkableRequest
+	index       string
+	typ         string
+	id          string
+	opType      string
+	routing     string
+	parent      string
+	timestamp   string
+	ttl         int64
+	refresh     *bool
+	version     int64  // default is MATCH_ANY
+	versionType string // default is "internal"
+	doc         interface{}
+}
+
+func NewBulkIndexRequest() *BulkIndexRequest {
+	return &BulkIndexRequest{
+		opType: "index",
+	}
+}
+
+func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest {
+	r.index = index
+	return r
+}
+
+func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest {
+	r.typ = typ
+	return r
+}
+
+func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest {
+	r.id = id
+	return r
+}
+
+func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest {
+	r.opType = opType
+	return r
+}
+
+func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest {
+	r.routing = routing
+	return r
+}
+
+func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest {
+	r.parent = parent
+	return r
+}
+
+func (r *BulkIndexRequest) Timestamp(timestamp string) *BulkIndexRequest {
+	r.timestamp = timestamp
+	return r
+}
+
+func (r *BulkIndexRequest) Ttl(ttl int64) *BulkIndexRequest {
+	r.ttl = ttl
+	return r
+}
+
+func (r *BulkIndexRequest) Refresh(refresh bool) *BulkIndexRequest {
+	r.refresh = &refresh
+	return r
+}
+
+func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest {
+	r.version = version
+	return r
+}
+
+func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest {
+	r.versionType = versionType
+	return r
+}
+
+func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest {
+	r.doc = doc
+	return r
+}
+
+func (r *BulkIndexRequest) String() string {
+	lines, err := r.Source()
+	if err == nil {
+		return strings.Join(lines, "\n")
+	}
+	return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkIndexRequest) Source() ([]string, error) {
+	// { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
+	// { "field1" : "value1" }
+
+	lines := make([]string, 2)
+
+	// "index" ...
+	command := make(map[string]interface{})
+	indexCommand := make(map[string]interface{})
+	if r.index != "" {
+		indexCommand["_index"] = r.index
+	}
+	if r.typ != "" {
+		indexCommand["_type"] = r.typ
+	}
+	if r.id != "" {
+		indexCommand["_id"] = r.id
+	}
+	if r.routing != "" {
+		indexCommand["_routing"] = r.routing
+	}
+	if r.parent != "" {
+		indexCommand["_parent"] = r.parent
+	}
+	if r.timestamp != "" {
+		indexCommand["_timestamp"] = r.timestamp
+	}
+	if r.ttl > 0 {
+		indexCommand["_ttl"] = r.ttl
+	}
+	if r.version > 0 {
+		indexCommand["_version"] = r.version
+	}
+	if r.versionType != "" {
+		indexCommand["_version_type"] = r.versionType
+	}
+	if r.refresh != nil {
+		indexCommand["refresh"] = *r.refresh
+	}
+	command[r.opType] = indexCommand
+	line, err := json.Marshal(command)
+	if err != nil {
+		return nil, err
+	}
+	lines[0] = string(line)
+
+	// "field1" ...
+	if r.doc != nil {
+		switch t := r.doc.(type) {
+		default:
+			body, err := json.Marshal(r.doc)
+			if err != nil {
+				return nil, err
+			}
+			lines[1] = string(body)
+		case json.RawMessage:
+			lines[1] = string(t)
+		case *json.RawMessage:
+			lines[1] = string(*t)
+		case string:
+			lines[1] = t
+		case *string:
+			lines[1] = *t
+		}
+	} else {
+		lines[1] = "{}"
+	}
+
+	return lines, nil
+}

+ 63 - 0
sword_base/olivere/elastic.v1/bulk_index_request_test.go

@@ -0,0 +1,63 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+	"time"
+)
+
+func TestBulkIndexRequestSerialization(t *testing.T) {
+	tests := []struct {
+		Request  BulkableRequest
+		Expected []string
+	}{
+		// #0
+		{
+			Request: NewBulkIndexRequest().Index("index1").Type("tweet").Id("1").
+				Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+			Expected: []string{
+				`{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+				`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+			},
+		},
+		// #1
+		{
+			Request: NewBulkIndexRequest().OpType("create").Index("index1").Type("tweet").Id("1").
+				Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+			Expected: []string{
+				`{"create":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+				`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+			},
+		},
+		// #2
+		{
+			Request: NewBulkIndexRequest().OpType("index").Index("index1").Type("tweet").Id("1").
+				Doc(tweet{User: "olivere", Created: time.Date(2014, 1, 18, 23, 59, 58, 0, time.UTC)}),
+			Expected: []string{
+				`{"index":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+				`{"user":"olivere","message":"","retweets":0,"created":"2014-01-18T23:59:58Z"}`,
+			},
+		},
+	}
+
+	for i, test := range tests {
+		lines, err := test.Request.Source()
+		if err != nil {
+			t.Fatalf("case #%d: expected no error, got: %v", i, err)
+		}
+		if lines == nil {
+			t.Fatalf("case #%d: expected lines, got nil", i)
+		}
+		if len(lines) != len(test.Expected) {
+			t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+		}
+		for j, line := range lines {
+			if line != test.Expected[j] {
+				t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+			}
+		}
+	}
+}

+ 17 - 0
sword_base/olivere/elastic.v1/bulk_request.go

@@ -0,0 +1,17 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+)
+
+// -- Bulkable request (index/update/delete) --
+
+// Generic interface to bulkable requests.
+type BulkableRequest interface {
+	fmt.Stringer
+	Source() ([]string, error)
+}

+ 370 - 0
sword_base/olivere/elastic.v1/bulk_test.go

@@ -0,0 +1,370 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestBulk(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+	index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+	index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+	delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+
+	bulkRequest := client.Bulk()
+	bulkRequest = bulkRequest.Add(index1Req)
+	bulkRequest = bulkRequest.Add(index2Req)
+	bulkRequest = bulkRequest.Add(delete1Req)
+
+	if bulkRequest.NumberOfActions() != 3 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
+	}
+
+	bulkResponse, err := bulkRequest.Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bulkResponse == nil {
+		t.Errorf("expected bulkResponse to be != nil; got nil")
+	}
+
+	if bulkRequest.NumberOfActions() != 0 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+	}
+
+	// Document with Id="1" should not exist
+	exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if exists {
+		t.Errorf("expected exists %v; got %v", false, exists)
+	}
+
+	// Document with Id="2" should exist
+	exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !exists {
+		t.Errorf("expected exists %v; got %v", true, exists)
+	}
+
+	// Update
+	updateDoc := struct {
+		Retweets int `json:"retweets"`
+	}{
+		42,
+	}
+	update1Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").Doc(&updateDoc)
+	bulkRequest = client.Bulk()
+	bulkRequest = bulkRequest.Add(update1Req)
+
+	if bulkRequest.NumberOfActions() != 1 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 1, bulkRequest.NumberOfActions())
+	}
+
+	bulkResponse, err = bulkRequest.Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bulkResponse == nil {
+		t.Errorf("expected bulkResponse to be != nil; got nil")
+	}
+
+	if bulkRequest.NumberOfActions() != 0 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 0, bulkRequest.NumberOfActions())
+	}
+
+	// Document with Id="1" should have a retweets count of 42
+	doc, err := client.Get().Index(testIndexName).Type("tweet").Id("2").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if doc == nil {
+		t.Fatal("expected doc to be != nil; got nil")
+	}
+	if !doc.Found {
+		t.Fatalf("expected doc to be found; got found = %v", doc.Found)
+	}
+	if doc.Source == nil {
+		t.Fatal("expected doc source to be != nil; got nil")
+	}
+	var updatedTweet tweet
+	err = json.Unmarshal(*doc.Source, &updatedTweet)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if updatedTweet.Retweets != 42 {
+		t.Errorf("expected updated tweet retweets = %v; got %v", 42, updatedTweet.Retweets)
+	}
+}
+
+func TestBulkWithIndexSetOnClient(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+	index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+	index2Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+	delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+
+	bulkRequest := client.Bulk().Index(testIndexName).Type("tweet")
+	bulkRequest = bulkRequest.Add(index1Req)
+	bulkRequest = bulkRequest.Add(index2Req)
+	bulkRequest = bulkRequest.Add(delete1Req)
+
+	if bulkRequest.NumberOfActions() != 3 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 3, bulkRequest.NumberOfActions())
+	}
+
+	bulkResponse, err := bulkRequest.Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bulkResponse == nil {
+		t.Errorf("expected bulkResponse to be != nil; got nil")
+	}
+
+	// Document with Id="1" should not exist
+	exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if exists {
+		t.Errorf("expected exists %v; got %v", false, exists)
+	}
+
+	// Document with Id="2" should exist
+	exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("2").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !exists {
+		t.Errorf("expected exists %v; got %v", true, exists)
+	}
+}
+
+func TestBulkRequestsSerialization(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "sandrae", Message: "Dancing all night long. Yeah."}
+
+	index1Req := NewBulkIndexRequest().Index(testIndexName).Type("tweet").Id("1").Doc(tweet1)
+	index2Req := NewBulkIndexRequest().OpType("create").Index(testIndexName).Type("tweet").Id("2").Doc(tweet2)
+	delete1Req := NewBulkDeleteRequest().Index(testIndexName).Type("tweet").Id("1")
+	update2Req := NewBulkUpdateRequest().Index(testIndexName).Type("tweet").Id("2").
+		Doc(struct {
+		Retweets int `json:"retweets"`
+	}{
+		Retweets: 42,
+	})
+
+	bulkRequest := client.Bulk()
+	bulkRequest = bulkRequest.Add(index1Req)
+	bulkRequest = bulkRequest.Add(index2Req)
+	bulkRequest = bulkRequest.Add(delete1Req)
+	bulkRequest = bulkRequest.Add(update2Req)
+
+	if bulkRequest.NumberOfActions() != 4 {
+		t.Errorf("expected bulkRequest.NumberOfActions %d; got %d", 4, bulkRequest.NumberOfActions())
+	}
+
+	expected := `{"index":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
+{"user":"olivere","message":"Welcome to Golang and Elasticsearch.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"create":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
+{"user":"sandrae","message":"Dancing all night long. Yeah.","retweets":0,"created":"0001-01-01T00:00:00Z"}
+{"delete":{"_id":"1","_index":"` + testIndexName + `","_type":"tweet"}}
+{"update":{"_id":"2","_index":"` + testIndexName + `","_type":"tweet"}}
+{"doc":{"retweets":42}}
+`
+	got, err := bulkRequest.bodyAsString()
+	if err != nil {
+		t.Fatalf("expected no error, got: %v", err)
+	}
+	if got != expected {
+		t.Errorf("expected\n%s\ngot:\n%s", expected, got)
+	}
+
+	// Run the bulk request
+	bulkResponse, err := bulkRequest.Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if bulkResponse == nil {
+		t.Errorf("expected bulkResponse to be != nil; got nil")
+	}
+	if bulkResponse.Took == 0 {
+		t.Errorf("expected took to be > 0; got %d", bulkResponse.Took)
+	}
+	if bulkResponse.Errors {
+		t.Errorf("expected errors to be %v; got %v", false, bulkResponse.Errors)
+	}
+	if len(bulkResponse.Items) != 4 {
+		t.Fatalf("expected 4 result items; got %d", len(bulkResponse.Items))
+	}
+
+	// Indexed actions
+	indexed := bulkResponse.Indexed()
+	if indexed == nil {
+		t.Fatal("expected indexed to be != nil; got nil")
+	}
+	if len(indexed) != 1 {
+		t.Fatalf("expected len(indexed) == %d; got %d", 1, len(indexed))
+	}
+	if indexed[0].Id != "1" {
+		t.Errorf("expected indexed[0].Id == %s; got %s", "1", indexed[0].Id)
+	}
+	if indexed[0].Status != 201 {
+		t.Errorf("expected indexed[0].Status == %d; got %d", 201, indexed[0].Status)
+	}
+
+	// Created actions
+	created := bulkResponse.Created()
+	if created == nil {
+		t.Fatal("expected created to be != nil; got nil")
+	}
+	if len(created) != 1 {
+		t.Fatalf("expected len(created) == %d; got %d", 1, len(created))
+	}
+	if created[0].Id != "2" {
+		t.Errorf("expected created[0].Id == %s; got %s", "2", created[0].Id)
+	}
+	if created[0].Status != 201 {
+		t.Errorf("expected created[0].Status == %d; got %d", 201, created[0].Status)
+	}
+
+	// Deleted actions
+	deleted := bulkResponse.Deleted()
+	if deleted == nil {
+		t.Fatal("expected deleted to be != nil; got nil")
+	}
+	if len(deleted) != 1 {
+		t.Fatalf("expected len(deleted) == %d; got %d", 1, len(deleted))
+	}
+	if deleted[0].Id != "1" {
+		t.Errorf("expected deleted[0].Id == %s; got %s", "1", deleted[0].Id)
+	}
+	if deleted[0].Status != 200 {
+		t.Errorf("expected deleted[0].Status == %d; got %d", 200, deleted[0].Status)
+	}
+	if !deleted[0].Found {
+		t.Errorf("expected deleted[0].Found == %v; got %v", true, deleted[0].Found)
+	}
+
+	// Updated actions
+	updated := bulkResponse.Updated()
+	if updated == nil {
+		t.Fatal("expected updated to be != nil; got nil")
+	}
+	if len(updated) != 1 {
+		t.Fatalf("expected len(updated) == %d; got %d", 1, len(updated))
+	}
+	if updated[0].Id != "2" {
+		t.Errorf("expected updated[0].Id == %s; got %s", "2", updated[0].Id)
+	}
+	if updated[0].Status != 200 {
+		t.Errorf("expected updated[0].Status == %d; got %d", 200, updated[0].Status)
+	}
+	if updated[0].Version != 2 {
+		t.Errorf("expected updated[0].Version == %d; got %d", 2, updated[0].Version)
+	}
+
+	// Succeeded actions
+	succeeded := bulkResponse.Succeeded()
+	if succeeded == nil {
+		t.Fatal("expected succeeded to be != nil; got nil")
+	}
+	if len(succeeded) != 4 {
+		t.Fatalf("expected len(succeeded) == %d; got %d", 4, len(succeeded))
+	}
+
+	// ById
+	id1Results := bulkResponse.ById("1")
+	if id1Results == nil {
+		t.Fatal("expected id1Results to be != nil; got nil")
+	}
+	if len(id1Results) != 2 {
+		t.Fatalf("expected len(id1Results) == %d; got %d", 2, len(id1Results))
+	}
+	if id1Results[0].Id != "1" {
+		t.Errorf("expected id1Results[0].Id == %s; got %s", "1", id1Results[0].Id)
+	}
+	if id1Results[0].Status != 201 {
+		t.Errorf("expected id1Results[0].Status == %d; got %d", 201, id1Results[0].Status)
+	}
+	if id1Results[0].Version != 1 {
+		t.Errorf("expected id1Results[0].Version == %d; got %d", 1, id1Results[0].Version)
+	}
+	if id1Results[1].Id != "1" {
+		t.Errorf("expected id1Results[1].Id == %s; got %s", "1", id1Results[1].Id)
+	}
+	if id1Results[1].Status != 200 {
+		t.Errorf("expected id1Results[1].Status == %d; got %d", 200, id1Results[1].Status)
+	}
+	if id1Results[1].Version != 2 {
+		t.Errorf("expected id1Results[1].Version == %d; got %d", 2, id1Results[1].Version)
+	}
+}
+
+func TestFailedBulkRequests(t *testing.T) {
+	js := `{
+  "took" : 2,
+  "errors" : true,
+  "items" : [ {
+    "index" : {
+      "_index" : "elastic-test",
+      "_type" : "tweet",
+      "_id" : "1",
+      "_version" : 1,
+      "status" : 201
+    }
+  }, {
+    "create" : {
+      "_index" : "elastic-test",
+      "_type" : "tweet",
+      "_id" : "2",
+      "_version" : 1,
+      "status" : 423,
+      "error" : "Locked"
+    }
+  }, {
+    "delete" : {
+      "_index" : "elastic-test",
+      "_type" : "tweet",
+      "_id" : "1",
+      "_version" : 2,
+      "status" : 404,
+      "found" : false
+    }
+  }, {
+    "update" : {
+      "_index" : "elastic-test",
+      "_type" : "tweet",
+      "_id" : "2",
+      "_version" : 2,
+      "status" : 200
+    }
+  } ]
+}`
+
+	var resp BulkResponse
+	err := json.Unmarshal([]byte(js), &resp)
+	if err != nil {
+		t.Fatal(err)
+	}
+	failed := resp.Failed()
+	if len(failed) != 2 {
+		t.Errorf("expected %d failed items; got: %d", 2, len(failed))
+	}
+}

+ 244 - 0
sword_base/olivere/elastic.v1/bulk_update_request.go

@@ -0,0 +1,244 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// Bulk request to update document in Elasticsearch.
+type BulkUpdateRequest struct {
+	BulkableRequest
+	index string
+	typ   string
+	id    string
+
+	routing         string
+	parent          string
+	script          string
+	scriptType      string
+	scriptLang      string
+	scriptParams    map[string]interface{}
+	version         int64  // default is MATCH_ANY
+	versionType     string // default is "internal"
+	retryOnConflict *int
+	refresh         *bool
+	upsert          interface{}
+	docAsUpsert     *bool
+	doc             interface{}
+	ttl             int64
+	timestamp       string
+}
+
+func NewBulkUpdateRequest() *BulkUpdateRequest {
+	return &BulkUpdateRequest{}
+}
+
+func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest {
+	r.index = index
+	return r
+}
+
+func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest {
+	r.typ = typ
+	return r
+}
+
+func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest {
+	r.id = id
+	return r
+}
+
+func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest {
+	r.routing = routing
+	return r
+}
+
+func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest {
+	r.parent = parent
+	return r
+}
+
+func (r *BulkUpdateRequest) Script(script string) *BulkUpdateRequest {
+	r.script = script
+	return r
+}
+
+func (r *BulkUpdateRequest) ScriptType(scriptType string) *BulkUpdateRequest {
+	r.scriptType = scriptType
+	return r
+}
+
+func (r *BulkUpdateRequest) ScriptLang(scriptLang string) *BulkUpdateRequest {
+	r.scriptLang = scriptLang
+	return r
+}
+
+func (r *BulkUpdateRequest) ScriptParams(params map[string]interface{}) *BulkUpdateRequest {
+	r.scriptParams = params
+	return r
+}
+
+func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest {
+	r.retryOnConflict = &retryOnConflict
+	return r
+}
+
+func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest {
+	r.version = version
+	return r
+}
+
+// VersionType can be "internal" (default), "external", "external_gte",
+// "external_gt", or "force".
+func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest {
+	r.versionType = versionType
+	return r
+}
+
+func (r *BulkUpdateRequest) Refresh(refresh bool) *BulkUpdateRequest {
+	r.refresh = &refresh
+	return r
+}
+
+func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest {
+	r.doc = doc
+	return r
+}
+
+func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest {
+	r.docAsUpsert = &docAsUpsert
+	return r
+}
+
+func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest {
+	r.upsert = doc
+	return r
+}
+
+func (r *BulkUpdateRequest) Ttl(ttl int64) *BulkUpdateRequest {
+	r.ttl = ttl
+	return r
+}
+
+func (r *BulkUpdateRequest) Timestamp(timestamp string) *BulkUpdateRequest {
+	r.timestamp = timestamp
+	return r
+}
+
+func (r *BulkUpdateRequest) String() string {
+	lines, err := r.Source()
+	if err == nil {
+		return strings.Join(lines, "\n")
+	}
+	return fmt.Sprintf("error: %v", err)
+}
+
+func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) {
+	switch t := data.(type) {
+	default:
+		body, err := json.Marshal(data)
+		if err != nil {
+			return "", err
+		}
+		return string(body), nil
+	case json.RawMessage:
+		return string(t), nil
+	case *json.RawMessage:
+		return string(*t), nil
+	case string:
+		return t, nil
+	case *string:
+		return *t, nil
+	}
+}
+
+func (r BulkUpdateRequest) Source() ([]string, error) {
+	// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+	// { "doc" : { "field1" : "value1", ... } }
+	// or
+	// { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } }
+	// { "script" : { ... } }
+
+	lines := make([]string, 2)
+
+	// "update" ...
+	command := make(map[string]interface{})
+	updateCommand := make(map[string]interface{})
+	if r.index != "" {
+		updateCommand["_index"] = r.index
+	}
+	if r.typ != "" {
+		updateCommand["_type"] = r.typ
+	}
+	if r.id != "" {
+		updateCommand["_id"] = r.id
+	}
+	if r.routing != "" {
+		updateCommand["_routing"] = r.routing
+	}
+	if r.parent != "" {
+		updateCommand["_parent"] = r.parent
+	}
+	if r.timestamp != "" {
+		updateCommand["_timestamp"] = r.timestamp
+	}
+	if r.ttl > 0 {
+		updateCommand["_ttl"] = r.ttl
+	}
+	if r.version > 0 {
+		updateCommand["_version"] = r.version
+	}
+	if r.versionType != "" {
+		updateCommand["_version_type"] = r.versionType
+	}
+	if r.refresh != nil {
+		updateCommand["refresh"] = *r.refresh
+	}
+	if r.retryOnConflict != nil {
+		updateCommand["_retry_on_conflict"] = *r.retryOnConflict
+	}
+	if r.upsert != nil {
+		updateCommand["upsert"] = r.upsert
+	}
+	command["update"] = updateCommand
+	line, err := json.Marshal(command)
+	if err != nil {
+		return nil, err
+	}
+	lines[0] = string(line)
+
+	// 2nd line: {"doc" : { ... }} or {"script": {...}}
+	source := make(map[string]interface{})
+	if r.docAsUpsert != nil {
+		source["doc_as_upsert"] = *r.docAsUpsert
+	}
+	if r.doc != nil {
+		// {"doc":{...}}
+		source["doc"] = r.doc
+	} else if r.script != "" {
+		// {"script":...}
+		source["script"] = r.script
+		if r.scriptLang != "" {
+			source["lang"] = r.scriptLang
+		}
+		/*
+			if r.scriptType != "" {
+				source["script_type"] = r.scriptType
+			}
+		*/
+		if r.scriptParams != nil && len(r.scriptParams) > 0 {
+			source["params"] = r.scriptParams
+		}
+	}
+	lines[1], err = r.getSourceAsString(source)
+	if err != nil {
+		return nil, err
+	}
+
+	return lines, nil
+}

+ 79 - 0
sword_base/olivere/elastic.v1/bulk_update_request_test.go

@@ -0,0 +1,79 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestBulkUpdateRequestSerialization(t *testing.T) {
+	tests := []struct {
+		Request  BulkableRequest
+		Expected []string
+	}{
+		// #0
+		{
+			Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").Doc(struct {
+				Counter int64 `json:"counter"`
+			}{
+				Counter: 42,
+			}),
+			Expected: []string{
+				`{"update":{"_id":"1","_index":"index1","_type":"tweet"}}`,
+				`{"doc":{"counter":42}}`,
+			},
+		},
+		// #1
+		{
+			Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+				RetryOnConflict(3).
+				DocAsUpsert(true).
+				Doc(struct {
+				Counter int64 `json:"counter"`
+			}{
+				Counter: 42,
+			}),
+			Expected: []string{
+				`{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet"}}`,
+				`{"doc":{"counter":42},"doc_as_upsert":true}`,
+			},
+		},
+		// #2
+		{
+			Request: NewBulkUpdateRequest().Index("index1").Type("tweet").Id("1").
+				RetryOnConflict(3).
+				Script(`ctx._source.retweets += param1`).
+				ScriptLang("js").
+				ScriptParams(map[string]interface{}{"param1": 42}).
+				Upsert(struct {
+				Counter int64 `json:"counter"`
+			}{
+				Counter: 42,
+			}),
+			Expected: []string{
+				`{"update":{"_id":"1","_index":"index1","_retry_on_conflict":3,"_type":"tweet","upsert":{"counter":42}}}`,
+				`{"lang":"js","params":{"param1":42},"script":"ctx._source.retweets += param1"}`,
+			},
+		},
+	}
+
+	for i, test := range tests {
+		lines, err := test.Request.Source()
+		if err != nil {
+			t.Fatalf("case #%d: expected no error, got: %v", i, err)
+		}
+		if lines == nil {
+			t.Fatalf("case #%d: expected lines, got nil", i)
+		}
+		if len(lines) != len(test.Expected) {
+			t.Fatalf("case #%d: expected %d lines, got %d", i, len(test.Expected), len(lines))
+		}
+		for j, line := range lines {
+			if line != test.Expected[j] {
+				t.Errorf("case #%d: expected line #%d to be %s, got: %s", i, j, test.Expected[j], line)
+			}
+		}
+	}
+}

+ 28 - 0
sword_base/olivere/elastic.v1/canonicalize.go

@@ -0,0 +1,28 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "net/url"
+
+// canonicalize takes a list of URLs and returns its canonicalized form, i.e.
+// remove anything but scheme, userinfo, host, and port. It also removes the
+// slash at the end. It also skips invalid URLs or URLs that do not use
+// protocol http or https.
+//
+// Example:
+// http://127.0.0.1:9200/path?query=1 -> http://127.0.0.1:9200
+func canonicalize(rawurls ...string) []string {
+	canonicalized := make([]string, 0)
+	for _, rawurl := range rawurls {
+		u, err := url.Parse(rawurl)
+		if err == nil && (u.Scheme == "http" || u.Scheme == "https") {
+			u.Fragment = ""
+			u.Path = ""
+			u.RawQuery = ""
+			canonicalized = append(canonicalized, u.String())
+		}
+	}
+	return canonicalized
+}

+ 41 - 0
sword_base/olivere/elastic.v1/canonicalize_test.go

@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestCanonicalize(t *testing.T) {
+	tests := []struct {
+		Input  []string
+		Output []string
+	}{
+		{
+			Input:  []string{"http://127.0.0.1/"},
+			Output: []string{"http://127.0.0.1"},
+		},
+		{
+			Input:  []string{"http://127.0.0.1:9200/", "gopher://golang.org/", "http://127.0.0.1:9201"},
+			Output: []string{"http://127.0.0.1:9200", "http://127.0.0.1:9201"},
+		},
+		{
+			Input:  []string{"http://user:secret@127.0.0.1/path?query=1#fragment"},
+			Output: []string{"http://user:secret@127.0.0.1"},
+		},
+		{
+			Input:  []string{"https://somewhere.on.mars:9999/path?query=1#fragment"},
+			Output: []string{"https://somewhere.on.mars:9999"},
+		},
+	}
+
+	for _, test := range tests {
+		got := canonicalize(test.Input...)
+		if !reflect.DeepEqual(got, test.Output) {
+			t.Errorf("expected %v; got: %v", test.Output, got)
+		}
+	}
+}

+ 96 - 0
sword_base/olivere/elastic.v1/clear_scroll.go

@@ -0,0 +1,96 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// ClearScrollService is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/search-request-scroll.html.
+type ClearScrollService struct {
+	client     *Client
+	pretty     bool
+	scrollId   []string
+	bodyJson   interface{}
+	bodyString string
+}
+
+// NewClearScrollService creates a new ClearScrollService.
+func NewClearScrollService(client *Client) *ClearScrollService {
+	return &ClearScrollService{
+		client:   client,
+		scrollId: make([]string, 0),
+	}
+}
+
+// ScrollId is a list of scroll IDs to clear.
+// Use _all to clear all search contexts.
+func (s *ClearScrollService) ScrollId(scrollId ...string) *ClearScrollService {
+	s.scrollId = make([]string, 0)
+	s.scrollId = append(s.scrollId, scrollId...)
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClearScrollService) buildURL() (string, url.Values, error) {
+	path, err := uritemplates.Expand("/_search/scroll", map[string]string{})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+	return path, url.Values{}, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClearScrollService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *ClearScrollService) Do() (*ClearScrollResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request body
+	body := strings.Join(s.scrollId, ",")
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("DELETE", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(ClearScrollResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// ClearScrollResponse is the response of ClearScrollService.Do.
+type ClearScrollResponse struct {
+}

+ 72 - 0
sword_base/olivere/elastic.v1/clear_scroll_test.go

@@ -0,0 +1,72 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	_ "net/http"
+	"testing"
+)
+
+func TestClearScroll(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Match all should return all documents
+	res, err := client.Scroll(testIndexName).Size(1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Errorf("expected results != nil; got nil")
+	}
+	if res.ScrollId == "" {
+		t.Errorf("expected scrollId in results; got %q", res.ScrollId)
+	}
+
+	// Search should succeed
+	_, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Clear scroll id
+	clearScrollRes, err := client.ClearScroll().ScrollId(res.ScrollId).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if clearScrollRes == nil {
+		t.Error("expected results != nil; got nil")
+	}
+
+	// Search result should fail
+	_, err = client.Scroll(testIndexName).Size(1).ScrollId(res.ScrollId).Do()
+	if err == nil {
+		t.Fatalf("expected scroll to fail")
+	}
+}

+ 1240 - 0
sword_base/olivere/elastic.v1/client.go

@@ -0,0 +1,1240 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"log"
+	"math/rand"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+	"regexp"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	// Version is the current version of Elastic.
+	Version = "2.0.0"
+
+	// DefaultUrl is the default endpoint of Elasticsearch on the local machine.
+	// It is used e.g. when initializing a new Client without a specific URL.
+	DefaultURL = "http://127.0.0.1:9200"
+
+	// DefaultScheme is the default protocol scheme to use when sniffing
+	// the Elasticsearch cluster.
+	DefaultScheme = "http"
+
+	// DefaultHealthcheckEnabled specifies if healthchecks are enabled by default.
+	DefaultHealthcheckEnabled = true
+
+	// DefaultHealthcheckTimeoutStartup is the time the healthcheck waits
+	// for a response from Elasticsearch on startup, i.e. when creating a
+	// client. After the client is started, a shorter timeout is commonly used
+	// (its default is specified in DefaultHealthcheckTimeout).
+	DefaultHealthcheckTimeoutStartup = 5 * time.Second
+
+	// DefaultHealthcheckTimeout specifies the time a running client waits for
+	// a response from Elasticsearch. Notice that the healthcheck timeout
+	// when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup).
+	DefaultHealthcheckTimeout = 1 * time.Second
+
+	// DefaultHealthcheckInterval is the default interval between
+	// two health checks of the nodes in the cluster.
+	DefaultHealthcheckInterval = 60 * time.Second
+
+	// DefaultSnifferEnabled specifies if the sniffer is enabled by default.
+	DefaultSnifferEnabled = true
+
+	// DefaultSnifferInterval is the interval between two sniffing procedures,
+	// i.e. the lookup of all nodes in the cluster and their addition/removal
+	// from the list of actual connections.
+	DefaultSnifferInterval = 15 * time.Minute
+
+	// DefaultSnifferTimeoutStartup is the default timeout for the sniffing
+	// process that is initiated while creating a new client. For subsequent
+	// sniffing processes, DefaultSnifferTimeout is used (by default).
+	DefaultSnifferTimeoutStartup = 5 * time.Second
+
+	// DefaultSnifferTimeout is the default timeout after which the
+	// sniffing process times out. Notice that for the initial sniffing
+	// process, DefaultSnifferTimeoutStartup is used.
+	DefaultSnifferTimeout = 2 * time.Second
+
+	// DefaultMaxRetries is the number of retries for a single request after
+	// Elastic will give up and return an error. It is zero by default, so
+	// retry is disabled by default.
+	DefaultMaxRetries = 0
+)
+
+var (
+	// ErrNoClient is raised when no Elasticsearch node is available.
+	ErrNoClient = errors.New("no Elasticsearch node available")
+
+	// ErrRetry is raised when a request cannot be executed after the configured
+	// number of retries.
+	ErrRetry = errors.New("cannot connect after several retries")
+)
+
+// ClientOptionFunc is a function that configures a Client.
+// It is used in NewClient.
+type ClientOptionFunc func(*Client) error
+
+// Client is an Elasticsearch client. Create one by calling NewClient.
+type Client struct {
+	c *http.Client // net/http Client to use for requests
+
+	connsMu sync.RWMutex // connsMu guards the next block
+	conns   []*conn      // all connections
+	cindex  int          // index into conns
+
+	mu                        sync.RWMutex  // guards the next block
+	urls                      []string      // set of URLs passed initially to the client
+	running                   bool          // true if the client's background processes are running
+	errorlog                  *log.Logger   // error log for critical messages
+	infolog                   *log.Logger   // information log for e.g. response times
+	tracelog                  *log.Logger   // trace log for debugging
+	maxRetries                int           // max. number of retries
+	scheme                    string        // http or https
+	healthcheckEnabled        bool          // healthchecks enabled or disabled
+	healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup
+	healthcheckTimeout        time.Duration // time the healthcheck waits for a response from Elasticsearch
+	healthcheckInterval       time.Duration // interval between healthchecks
+	healthcheckStop           chan bool     // notify healthchecker to stop, and notify back
+	snifferEnabled            bool          // sniffer enabled or disabled
+	snifferTimeoutStartup     time.Duration // time the sniffer waits for a response from nodes info API on startup
+	snifferTimeout            time.Duration // time the sniffer waits for a response from nodes info API
+	snifferInterval           time.Duration // interval between sniffing
+	snifferStop               chan bool     // notify sniffer to stop, and notify back
+	decoder                   Decoder       // used to decode data sent from Elasticsearch
+}
+
+// NewClient creates a new client to work with Elasticsearch.
+//
+// The caller can configure the new client by passing configuration options
+// to the func.
+//
+// Example:
+//
+//   client, err := elastic.NewClient(
+//     elastic.SetURL("http://localhost:9200", "http://localhost:9201"),
+//     elastic.SetMaxRetries(10))
+//
+// If no URL is configured, Elastic uses DefaultURL by default.
+//
+// If the sniffer is enabled (the default), the new client then sniffes
+// the cluster via the Nodes Info API
+// (see http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cluster-nodes-info.html#cluster-nodes-info).
+// It uses the URLs specified by the caller. The caller is responsible
+// to only pass a list of URLs of nodes that belong to the same cluster.
+// This sniffing process is run on startup and periodically.
+// Use SnifferInterval to set the interval between two sniffs (default is
+// 15 minutes). In other words: By default, the client will find new nodes
+// in the cluster and remove those that are no longer available every
+// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient.
+//
+// The list of nodes found in the sniffing process will be used to make
+// connections to the REST API of Elasticsearch. These nodes are also
+// periodically checked in a shorter time frame. This process is called
+// a health check. By default, a health check is done every 60 seconds.
+// You can set a shorter or longer interval by SetHealthcheckInterval.
+// Disabling health checks is not recommended, but can be done by
+// SetHealthcheck(false).
+//
+// Connections are automatically marked as dead or healthy while
+// making requests to Elasticsearch. When a request fails, Elastic will
+// retry up to a maximum number of retries configured with SetMaxRetries.
+// Retries are disabled by default.
+//
+// If no HttpClient is configured, then http.DefaultClient is used.
+// You can use your own http.Client with some http.Transport for
+// advanced scenarios.
+//
+// An error is also returned when some configuration option is invalid or
+// the new client cannot sniff the cluster (if enabled).
+func NewClient(options ...ClientOptionFunc) (*Client, error) {
+	// Set up the client
+	c := &Client{
+		c:                         http.DefaultClient,
+		conns:                     make([]*conn, 0),
+		cindex:                    -1,
+		scheme:                    DefaultScheme,
+		decoder:                   &DefaultDecoder{},
+		maxRetries:                DefaultMaxRetries,
+		healthcheckEnabled:        DefaultHealthcheckEnabled,
+		healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup,
+		healthcheckTimeout:        DefaultHealthcheckTimeout,
+		healthcheckInterval:       DefaultHealthcheckInterval,
+		healthcheckStop:           make(chan bool),
+		snifferEnabled:            DefaultSnifferEnabled,
+		snifferTimeoutStartup:     DefaultSnifferTimeoutStartup,
+		snifferTimeout:            DefaultSnifferTimeout,
+		snifferInterval:           DefaultSnifferInterval,
+		snifferStop:               make(chan bool),
+	}
+
+	// Run the options on it
+	for _, option := range options {
+		if err := option(c); err != nil {
+			return nil, err
+		}
+	}
+
+	if len(c.urls) == 0 {
+		c.urls = []string{DefaultURL}
+	}
+	c.urls = canonicalize(c.urls...)
+
+	if c.snifferEnabled {
+		// Sniff the cluster initially
+		if err := c.sniff(c.snifferTimeoutStartup); err != nil {
+			return nil, err
+		}
+	} else {
+		// Do not sniff the cluster initially. Use the provided URLs instead.
+		for _, url := range c.urls {
+			c.conns = append(c.conns, newConn(url, url))
+		}
+	}
+
+	// Perform an initial health check and
+	// ensure that we have at least one connection available
+	if c.healthcheckEnabled {
+		c.healthcheck(c.healthcheckTimeoutStartup, true)
+	}
+	if err := c.mustActiveConn(); err != nil {
+		return nil, err
+	}
+
+	if c.snifferEnabled {
+		go c.sniffer() // periodically update cluster information
+	}
+	if c.healthcheckEnabled {
+		go c.healthchecker() // start goroutine periodically ping all nodes of the cluster
+	}
+	c.mu.Lock()
+	c.running = true
+	c.mu.Unlock()
+
+	return c, nil
+}
+
+// SetHttpClient can be used to specify the http.Client to use when making
+// HTTP requests to Elasticsearch.
+func SetHttpClient(httpClient *http.Client) ClientOptionFunc {
+	return func(c *Client) error {
+		if httpClient != nil {
+			c.c = httpClient
+		} else {
+			c.c = http.DefaultClient
+		}
+		return nil
+	}
+}
+
+// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that
+// when sniffing is enabled, these URLs are used to initially sniff the
+// cluster on startup.
+func SetURL(urls ...string) ClientOptionFunc {
+	return func(c *Client) error {
+		switch len(urls) {
+		case 0:
+			c.urls = []string{DefaultURL}
+		default:
+			c.urls = urls
+		}
+		return nil
+	}
+}
+
+// SetScheme sets the HTTP scheme to look for when sniffing (http or https).
+// This is http by default.
+func SetScheme(scheme string) ClientOptionFunc {
+	return func(c *Client) error {
+		c.scheme = scheme
+		return nil
+	}
+}
+
+// SetSniff enables or disables the sniffer (enabled by default).
+func SetSniff(enabled bool) ClientOptionFunc {
+	return func(c *Client) error {
+		c.snifferEnabled = enabled
+		return nil
+	}
+}
+
+// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used
+// when creating a new client. The default is 5 seconds. Notice that the
+// timeout being used for subsequent sniffing processes is set with
+// SetSnifferTimeout.
+func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+	return func(c *Client) error {
+		c.snifferTimeoutStartup = timeout
+		return nil
+	}
+}
+
+// SetSnifferTimeout sets the timeout for the sniffer that finds the
+// nodes in a cluster. The default is 2 seconds. Notice that the timeout
+// used when creating a new client on startup is usually greater and can
+// be set with SetSnifferTimeoutStartup.
+func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc {
+	return func(c *Client) error {
+		c.snifferTimeout = timeout
+		return nil
+	}
+}
+
+// SetSnifferInterval sets the interval between two sniffing processes.
+// The default interval is 15 minutes.
+func SetSnifferInterval(interval time.Duration) ClientOptionFunc {
+	return func(c *Client) error {
+		c.snifferInterval = interval
+		return nil
+	}
+}
+
+// SetHealthcheck enables or disables healthchecks (enabled by default).
+func SetHealthcheck(enabled bool) ClientOptionFunc {
+	return func(c *Client) error {
+		c.healthcheckEnabled = enabled
+		return nil
+	}
+}
+
+// SetHealthcheckTimeoutStartup sets the timeout for the initial health check.
+// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup).
+// Notice that timeouts for subsequent health checks can be modified with
+// SetHealthcheckTimeout.
+func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc {
+	return func(c *Client) error {
+		c.healthcheckTimeoutStartup = timeout
+		return nil
+	}
+}
+
+// SetHealthcheckTimeout sets the timeout for periodic health checks.
+// The default timeout is 1 second (see DefaultHealthcheckTimeout).
+// Notice that a different (usually larger) timeout is used for the initial
+// healthcheck, which is initiated while creating a new client.
+// The startup timeout can be modified with SetHealthcheckTimeoutStartup.
+func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc {
+	return func(c *Client) error {
+		c.healthcheckTimeout = timeout
+		return nil
+	}
+}
+
+// SetHealthcheckInterval sets the interval between two health checks.
+// The default interval is 60 seconds.
+func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc {
+	return func(c *Client) error {
+		c.healthcheckInterval = interval
+		return nil
+	}
+}
+
+// SetMaxRetries sets the maximum number of retries before giving up when
+// performing a HTTP request to Elasticsearch.
+func SetMaxRetries(maxRetries int) func(*Client) error {
+	return func(c *Client) error {
+		if maxRetries < 0 {
+			return errors.New("MaxRetries must be greater than or equal to 0")
+		}
+		c.maxRetries = maxRetries
+		return nil
+	}
+}
+
+// SetDecoder sets the Decoder to use when decoding data from Elasticsearch.
+// DefaultDecoder is used by default.
+func SetDecoder(decoder Decoder) func(*Client) error {
+	return func(c *Client) error {
+		if decoder != nil {
+			c.decoder = decoder
+		} else {
+			c.decoder = &DefaultDecoder{}
+		}
+		return nil
+	}
+}
+
+// SetErrorLog sets the logger for critical messages like nodes joining
+// or leaving the cluster or failing requests. It is nil by default.
+func SetErrorLog(logger *log.Logger) func(*Client) error {
+	return func(c *Client) error {
+		c.errorlog = logger
+		return nil
+	}
+}
+
+// SetInfoLog sets the logger for informational messages, e.g. requests
+// and their response times. It is nil by default.
+func SetInfoLog(logger *log.Logger) func(*Client) error {
+	return func(c *Client) error {
+		c.infolog = logger
+		return nil
+	}
+}
+
+// SetTraceLog specifies the log.Logger to use for output of HTTP requests
+// and responses which is helpful during debugging. It is nil by default.
+func SetTraceLog(logger *log.Logger) func(*Client) error {
+	return func(c *Client) error {
+		c.tracelog = logger
+		return nil
+	}
+}
+
+// String returns a string representation of the client status.
+func (c *Client) String() string {
+	c.connsMu.Lock()
+	conns := c.conns
+	c.connsMu.Unlock()
+
+	var buf bytes.Buffer
+	for i, conn := range conns {
+		if i > 0 {
+			buf.WriteString(", ")
+		}
+		buf.WriteString(conn.String())
+	}
+	return buf.String()
+}
+
+// IsRunning returns true if the background processes of the client are
+// running, false otherwise.
+func (c *Client) IsRunning() bool {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.running
+}
+
+// Start starts the background processes like sniffing the cluster and
+// periodic health checks. You don't need to run Start when creating a
+// client with NewClient; the background processes are run by default.
+//
+// If the background processes are already running, this is a no-op.
+func (c *Client) Start() {
+	c.mu.RLock()
+	if c.running {
+		c.mu.RUnlock()
+		return
+	}
+	c.mu.RUnlock()
+
+	if c.snifferEnabled {
+		go c.sniffer()
+	}
+	if c.healthcheckEnabled {
+		go c.healthchecker()
+	}
+
+	c.mu.Lock()
+	c.running = true
+	c.mu.Unlock()
+
+	c.infof("elastic: client started")
+}
+
+// Stop stops the background processes that the client is running,
+// i.e. sniffing the cluster periodically and running health checks
+// on the nodes.
+//
+// If the background processes are not running, this is a no-op.
+func (c *Client) Stop() {
+	c.mu.RLock()
+	if !c.running {
+		c.mu.RUnlock()
+		return
+	}
+	c.mu.RUnlock()
+
+	if c.healthcheckEnabled {
+		c.healthcheckStop <- true
+		<-c.healthcheckStop
+	}
+
+	if c.snifferEnabled {
+		c.snifferStop <- true
+		<-c.snifferStop
+	}
+
+	c.mu.Lock()
+	c.running = false
+	c.mu.Unlock()
+
+	c.infof("elastic: client stopped")
+}
+
+// errorf logs to the error log.
+func (c *Client) errorf(format string, args ...interface{}) {
+	if c.errorlog != nil {
+		c.errorlog.Printf(format, args...)
+	}
+}
+
+// infof logs informational messages.
+func (c *Client) infof(format string, args ...interface{}) {
+	if c.infolog != nil {
+		c.infolog.Printf(format, args...)
+	}
+}
+
+// tracef logs to the trace log.
+func (c *Client) tracef(format string, args ...interface{}) {
+	if c.tracelog != nil {
+		c.tracelog.Printf(format, args...)
+	}
+}
+
+// dumpRequest dumps the given HTTP request to the trace log.
+func (c *Client) dumpRequest(r *http.Request) {
+	if c.tracelog != nil {
+		out, err := httputil.DumpRequestOut(r, true)
+		if err == nil {
+			c.tracef("%s\n", string(out))
+		}
+	}
+}
+
+// dumpResponse dumps the given HTTP response to the trace log.
+func (c *Client) dumpResponse(resp *http.Response) {
+	if c.tracelog != nil {
+		out, err := httputil.DumpResponse(resp, true)
+		if err == nil {
+			c.tracef("%s\n", string(out))
+		}
+	}
+}
+
+// sniffer periodically runs sniff.
+func (c *Client) sniffer() {
+	c.mu.RLock()
+	timeout := c.snifferTimeout
+	interval := c.snifferInterval
+	c.mu.RUnlock()
+
+	ticker := time.NewTicker(interval)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-c.snifferStop:
+			// we are asked to stop, so we signal back that we're stopping now
+			c.snifferStop <- true
+			return
+		case <-ticker.C:
+			c.sniff(timeout)
+		}
+	}
+}
+
+// sniff uses the Node Info API to return the list of nodes in the cluster.
+// It uses the list of URLs passed on startup plus the list of URLs found
+// by the preceding sniffing process (if sniffing is enabled).
+//
+// If sniffing is disabled, this is a no-op.
+func (c *Client) sniff(timeout time.Duration) error {
+	c.mu.RLock()
+	if !c.snifferEnabled {
+		c.mu.RUnlock()
+		return nil
+	}
+
+	// Use all available URLs provided to sniff the cluster.
+	urlsMap := make(map[string]bool)
+	urls := make([]string, 0)
+
+	// Add all URLs provided on startup
+	for _, url := range c.urls {
+		urlsMap[url] = true
+		urls = append(urls, url)
+	}
+	c.mu.RUnlock()
+
+	// Add all URLs found by sniffing
+	c.connsMu.RLock()
+	for _, conn := range c.conns {
+		if !conn.IsDead() {
+			url := conn.URL()
+			if _, found := urlsMap[url]; !found {
+				urls = append(urls, url)
+			}
+		}
+	}
+	c.connsMu.RUnlock()
+
+	if len(urls) == 0 {
+		return ErrNoClient
+	}
+
+	// Start sniffing on all found URLs
+	ch := make(chan []*conn, len(urls))
+	for _, url := range urls {
+		go func(url string) { ch <- c.sniffNode(url) }(url)
+	}
+
+	// Wait for the results to come back, or the process times out.
+	for {
+		select {
+		case conns := <-ch:
+			if len(conns) > 0 {
+				c.updateConns(conns)
+				return nil
+			}
+		case <-time.After(timeout):
+			// We get here if no cluster responds in time
+			return ErrNoClient
+		}
+	}
+}
+
+// reSniffHostAndPort is used to extract hostname and port from a result
+// from a Nodes Info API (example: "inet[/127.0.0.1:9200]").
+var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`)
+
+// sniffNode sniffs a single node. This method is run as a goroutine
+// in sniff. If successful, it returns the list of node URLs extracted
+// from the result of calling Nodes Info API. Otherwise, an empty array
+// is returned.
+func (c *Client) sniffNode(url string) []*conn {
+	nodes := make([]*conn, 0)
+
+	// Call the Nodes Info API at /_nodes/http
+	req, err := NewRequest("GET", url+"/_nodes/http")
+	if err != nil {
+		return nodes
+	}
+
+	res, err := c.c.Do((*http.Request)(req))
+	if err != nil {
+		return nodes
+	}
+	if res == nil {
+		return nodes
+	}
+
+	if res.Body != nil {
+		defer res.Body.Close()
+	}
+
+	var info NodesInfoResponse
+	if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
+		if len(info.Nodes) > 0 {
+			switch c.scheme {
+			case "https":
+				for nodeID, node := range info.Nodes {
+					m := reSniffHostAndPort.FindStringSubmatch(node.HTTPSAddress)
+					if len(m) == 3 {
+						url := fmt.Sprintf("https://%s:%s", m[1], m[2])
+						nodes = append(nodes, newConn(nodeID, url))
+					}
+				}
+			default:
+				for nodeID, node := range info.Nodes {
+					m := reSniffHostAndPort.FindStringSubmatch(node.HTTPAddress)
+					if len(m) == 3 {
+						url := fmt.Sprintf("http://%s:%s", m[1], m[2])
+						nodes = append(nodes, newConn(nodeID, url))
+					}
+				}
+			}
+		}
+	}
+	return nodes
+}
+
+// updateConns updates the clients' connections with new information
+// gather by a sniff operation.
+func (c *Client) updateConns(conns []*conn) {
+	c.connsMu.Lock()
+
+	newConns := make([]*conn, 0)
+
+	// Build up new connections:
+	// If we find an existing connection, use that (including no. of failures etc.).
+	// If we find a new connection, add it.
+	for _, conn := range conns {
+		var found bool
+		for _, oldConn := range c.conns {
+			if oldConn.NodeID() == conn.NodeID() {
+				// Take over the old connection
+				newConns = append(newConns, oldConn)
+				found = true
+				break
+			}
+		}
+		if !found {
+			// New connection didn't exist, so add it to our list of new conns.
+			c.errorf("elastic: %s joined the cluster", conn.URL())
+			newConns = append(newConns, conn)
+		}
+	}
+
+	c.conns = newConns
+	c.cindex = -1
+	c.connsMu.Unlock()
+}
+
+// healthchecker periodically runs healthcheck.
+func (c *Client) healthchecker() {
+	c.mu.RLock()
+	timeout := c.healthcheckTimeout
+	interval := c.healthcheckInterval
+	c.mu.RUnlock()
+
+	ticker := time.NewTicker(interval)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-c.healthcheckStop:
+			// we are asked to stop, so we signal back that we're stopping now
+			c.healthcheckStop <- true
+			return
+		case <-ticker.C:
+			c.healthcheck(timeout, false)
+		}
+	}
+}
+
+// healthcheck does a health check on all nodes in the cluster. Depending on
+// the node state, it marks connections as dead, sets them alive etc.
+// If healthchecks are disabled and force is false, this is a no-op.
+// The timeout specifies how long to wait for a response from Elasticsearch.
+func (c *Client) healthcheck(timeout time.Duration, force bool) {
+	c.mu.RLock()
+	if !c.healthcheckEnabled && !force {
+		c.mu.RUnlock()
+		return
+	}
+	c.mu.RUnlock()
+
+	c.connsMu.RLock()
+	conns := c.conns
+	c.connsMu.RUnlock()
+
+	timeoutInMillis := int64(timeout / time.Millisecond)
+
+	for _, conn := range conns {
+		params := make(url.Values)
+		params.Set("timeout", fmt.Sprintf("%dms", timeoutInMillis))
+		req, err := NewRequest("HEAD", conn.URL()+"/?"+params.Encode())
+		if err == nil {
+			res, err := c.c.Do((*http.Request)(req))
+			if err == nil {
+				if res.Body != nil {
+					defer res.Body.Close()
+				}
+				if res.StatusCode >= 200 && res.StatusCode < 300 {
+					conn.MarkAsAlive()
+				} else {
+					conn.MarkAsDead()
+					c.errorf("elastic: %s is dead [status=%d]", conn.URL(), res.StatusCode)
+				}
+			} else {
+				c.errorf("elastic: %s is dead", conn.URL())
+				conn.MarkAsDead()
+			}
+		} else {
+			c.errorf("elastic: %s is dead", conn.URL())
+			conn.MarkAsDead()
+		}
+	}
+}
+
+// next returns the next available connection, or ErrNoClient.
+func (c *Client) next() (*conn, error) {
+	// We do round-robin here.
+	// TODO(oe) This should be a pluggable strategy, like the Selector in the official clients.
+	c.connsMu.Lock()
+	defer c.connsMu.Unlock()
+
+	i := 0
+	numConns := len(c.conns)
+	for {
+		i += 1
+		if i > numConns {
+			break // we visited all conns: they all seem to be dead
+		}
+		c.cindex += 1
+		if c.cindex >= numConns {
+			c.cindex = 0
+		}
+		conn := c.conns[c.cindex]
+		if !conn.IsDead() {
+			return conn, nil
+		}
+	}
+
+	// TODO(oe) As a last resort, we could try to awake a dead connection here.
+
+	// We tried hard, but there is no node available
+	return nil, ErrNoClient
+}
+
+// mustActiveConn returns nil if there is an active connection,
+// otherwise ErrNoClient is returned.
+func (c *Client) mustActiveConn() error {
+	c.connsMu.Lock()
+	defer c.connsMu.Unlock()
+
+	for _, c := range c.conns {
+		if !c.IsDead() {
+			return nil
+		}
+	}
+	return ErrNoClient
+}
+
+// PerformRequest does a HTTP request to Elasticsearch.
+// It returns a response and an error on failure.
+func (c *Client) PerformRequest(method, path string, params url.Values, body interface{}) (*Response, error) {
+	start := time.Now().UTC()
+
+	c.mu.RLock()
+	timeout := c.healthcheckTimeout
+	retries := c.maxRetries
+	c.mu.RUnlock()
+
+	var err error
+	var conn *conn
+	var req *Request
+	var resp *Response
+	var retried bool
+
+	// We wait between retries, using simple exponential back-off.
+	// TODO: Make this configurable, including the jitter.
+	retryWaitMsec := int64(100 + (rand.Intn(20) - 10))
+
+	for {
+		pathWithParams := path
+		if len(params) > 0 {
+			pathWithParams += "?" + params.Encode()
+		}
+
+		// Get a connection
+		conn, err = c.next()
+		if err == ErrNoClient {
+			if !retried {
+				// Force a healtcheck as all connections seem to be dead.
+				c.healthcheck(timeout, false)
+			}
+			retries -= 1
+			if retries <= 0 {
+				return nil, err
+			}
+			retried = true
+			time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+			retryWaitMsec += retryWaitMsec
+			continue // try again
+		}
+		if err != nil {
+			c.errorf("elastic: cannot get connection from pool")
+			return nil, err
+		}
+
+		req, err = NewRequest(method, conn.URL()+pathWithParams)
+		if err != nil {
+			c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err)
+			return nil, err
+		}
+
+		// Set body
+		if body != nil {
+			switch b := body.(type) {
+			case string:
+				req.SetBodyString(b)
+				break
+			default:
+				req.SetBodyJson(body)
+				break
+			}
+		}
+
+		// Tracing
+		c.dumpRequest((*http.Request)(req))
+
+		// Get response
+		res, err := c.c.Do((*http.Request)(req))
+		if err != nil {
+			retries -= 1
+			if retries <= 0 {
+				c.errorf("elastic: %s is dead", conn.URL())
+				conn.MarkAsDead()
+				return nil, err
+			}
+			retried = true
+			time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+			retryWaitMsec += retryWaitMsec
+			continue // try again
+		}
+		if res.Body != nil {
+			defer res.Body.Close()
+		}
+
+		// Check for errors
+		if err := checkResponse(res); err != nil {
+			retries -= 1
+			if retries <= 0 {
+				return nil, err
+			}
+			retried = true
+			time.Sleep(time.Duration(retryWaitMsec) * time.Millisecond)
+			retryWaitMsec += retryWaitMsec
+			continue // try again
+		}
+
+		// Tracing
+		c.dumpResponse(res)
+
+		// We successfully made a request with this connection
+		conn.MarkAsHealthy()
+
+		resp, err = c.newResponse(res)
+		if err != nil {
+			return nil, err
+		}
+
+		break
+	}
+
+	duration := time.Now().UTC().Sub(start)
+	c.infof("%s %s [status:%d, request:%.3fs]",
+		strings.ToUpper(method),
+		req.URL,
+		resp.StatusCode,
+		float64(int64(duration/time.Millisecond))/1000)
+
+	return resp, nil
+}
+
+// ElasticsearchVersion returns the version number of Elasticsearch
+// running on the given URL.
+func (c *Client) ElasticsearchVersion(url string) (string, error) {
+	res, _, err := c.Ping().URL(url).Do()
+	if err != nil {
+		return "", err
+	}
+	return res.Version.Number, nil
+}
+
+// IndexNames returns the names of all indices in the cluster.
+func (c *Client) IndexNames() ([]string, error) {
+	res, err := c.IndexGetSettings().Index("_all").Do()
+	if err != nil {
+		return nil, err
+	}
+	var names []string
+	for name, _ := range res {
+		names = append(names, name)
+	}
+	return names, nil
+}
+
+// Ping checks if a given node in a cluster exists and (optionally)
+// returns some basic information about the Elasticsearch server,
+// e.g. the Elasticsearch version number.
+func (c *Client) Ping() *PingService {
+	return NewPingService(c)
+}
+
+// CreateIndex returns a service to create a new index.
+func (c *Client) CreateIndex(name string) *CreateIndexService {
+	builder := NewCreateIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// DeleteIndex returns a service to delete an index.
+func (c *Client) DeleteIndex(name string) *DeleteIndexService {
+	builder := NewDeleteIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// IndexExists allows to check if an index exists.
+func (c *Client) IndexExists(name string) *IndexExistsService {
+	builder := NewIndexExistsService(c)
+	builder.Index(name)
+	return builder
+}
+
+// TypeExists allows to check if one or more types exist in one or more indices.
+func (c *Client) TypeExists() *IndicesExistsTypeService {
+	return NewIndicesExistsTypeService(c)
+}
+
+// IndexStats provides statistics on different operations happining
+// in one or more indices.
+func (c *Client) IndexStats(indices ...string) *IndicesStatsService {
+	builder := NewIndicesStatsService(c)
+	builder = builder.Index(indices...)
+	return builder
+}
+
+// OpenIndex opens an index.
+func (c *Client) OpenIndex(name string) *OpenIndexService {
+	builder := NewOpenIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// CloseIndex closes an index.
+func (c *Client) CloseIndex(name string) *CloseIndexService {
+	builder := NewCloseIndexService(c)
+	builder.Index(name)
+	return builder
+}
+
+// Index a document.
+func (c *Client) Index() *IndexService {
+	builder := NewIndexService(c)
+	return builder
+}
+
+// IndexGet retrieves information about one or more indices.
+// IndexGet is only available for Elasticsearch 1.4 or later.
+func (c *Client) IndexGet() *IndicesGetService {
+	builder := NewIndicesGetService(c)
+	return builder
+}
+
+// IndexGetSettings retrieves settings about one or more indices.
+func (c *Client) IndexGetSettings() *IndicesGetSettingsService {
+	builder := NewIndicesGetSettingsService(c)
+	return builder
+}
+
+// Update a document.
+func (c *Client) Update() *UpdateService {
+	builder := NewUpdateService(c)
+	return builder
+}
+
+// Delete a document.
+func (c *Client) Delete() *DeleteService {
+	builder := NewDeleteService(c)
+	return builder
+}
+
+// DeleteByQuery deletes documents as found by a query.
+func (c *Client) DeleteByQuery() *DeleteByQueryService {
+	builder := NewDeleteByQueryService(c)
+	return builder
+}
+
+// Get a document.
+func (c *Client) Get() *GetService {
+	builder := NewGetService(c)
+	return builder
+}
+
+// MultiGet retrieves multiple documents in one roundtrip.
+func (c *Client) MultiGet() *MultiGetService {
+	builder := NewMultiGetService(c)
+	return builder
+}
+
+// Exists checks if a document exists.
+func (c *Client) Exists() *ExistsService {
+	builder := NewExistsService(c)
+	return builder
+}
+
+// Count documents.
+func (c *Client) Count(indices ...string) *CountService {
+	builder := NewCountService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Search is the entry point for searches.
+func (c *Client) Search(indices ...string) *SearchService {
+	builder := NewSearchService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Percolate allows to send a document and return matching queries.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html.
+func (c *Client) Percolate() *PercolateService {
+	builder := NewPercolateService(c)
+	return builder
+}
+
+// MultiSearch is the entry point for multi searches.
+func (c *Client) MultiSearch() *MultiSearchService {
+	return NewMultiSearchService(c)
+}
+
+// Suggest returns a service to return suggestions.
+func (c *Client) Suggest(indices ...string) *SuggestService {
+	builder := NewSuggestService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Scan through documents. Use this to iterate inside a server process
+// where the results will be processed without returning them to a client.
+func (c *Client) Scan(indices ...string) *ScanService {
+	builder := NewScanService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Scroll through documents. Use this to efficiently scroll through results
+// while returning the results to a client. Use Scan when you don't need
+// to return requests to a client (i.e. not paginating via request/response).
+func (c *Client) Scroll(indices ...string) *ScrollService {
+	builder := NewScrollService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// ClearScroll can be used to clear search contexts manually.
+func (c *Client) ClearScroll() *ClearScrollService {
+	builder := NewClearScrollService(c)
+	return builder
+}
+
+// Optimize asks Elasticsearch to optimize one or more indices.
+func (c *Client) Optimize(indices ...string) *OptimizeService {
+	builder := NewOptimizeService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Refresh asks Elasticsearch to refresh one or more indices.
+func (c *Client) Refresh(indices ...string) *RefreshService {
+	builder := NewRefreshService(c)
+	builder.Indices(indices...)
+	return builder
+}
+
+// Flush asks Elasticsearch to free memory from the index and
+// flush data to disk.
+func (c *Client) Flush() *FlushService {
+	builder := NewFlushService(c)
+	return builder
+}
+
+// Explain computes a score explanation for a query and a specific document.
+func (c *Client) Explain(index, typ, id string) *ExplainService {
+	builder := NewExplainService(c)
+	builder = builder.Index(index).Type(typ).Id(id)
+	return builder
+}
+
+// Bulk is the entry point to mass insert/update/delete documents.
+func (c *Client) Bulk() *BulkService {
+	builder := NewBulkService(c)
+	return builder
+}
+
+// Alias enables the caller to add and/or remove aliases.
+func (c *Client) Alias() *AliasService {
+	builder := NewAliasService(c)
+	return builder
+}
+
+// Aliases returns aliases by index name(s).
+func (c *Client) Aliases() *AliasesService {
+	builder := NewAliasesService(c)
+	return builder
+}
+
+// GetTemplate gets a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) GetTemplate() *GetTemplateService {
+	return NewGetTemplateService(c)
+}
+
+// PutTemplate creates or updates a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) PutTemplate() *PutTemplateService {
+	return NewPutTemplateService(c)
+}
+
+// DeleteTemplate deletes a search template.
+// Use IndexXXXTemplate funcs to manage index templates.
+func (c *Client) DeleteTemplate() *DeleteTemplateService {
+	return NewDeleteTemplateService(c)
+}
+
+// IndexGetTemplate gets an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService {
+	builder := NewIndicesGetTemplateService(c)
+	builder = builder.Name(names...)
+	return builder
+}
+
+// IndexTemplateExists gets check if an index template exists.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService {
+	builder := NewIndicesExistsTemplateService(c)
+	builder = builder.Name(name)
+	return builder
+}
+
+// IndexPutTemplate creates or updates an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService {
+	builder := NewIndicesPutTemplateService(c)
+	builder = builder.Name(name)
+	return builder
+}
+
+// IndexDeleteTemplate deletes an index template.
+// Use XXXTemplate funcs to manage search templates.
+func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService {
+	builder := NewIndicesDeleteTemplateService(c)
+	builder = builder.Name(name)
+	return builder
+}
+
+// GetMapping gets a mapping.
+func (c *Client) GetMapping() *GetMappingService {
+	return NewGetMappingService(c)
+}
+
+// PutMapping registers a mapping.
+func (c *Client) PutMapping() *PutMappingService {
+	return NewPutMappingService(c)
+}
+
+// DeleteMapping deletes a mapping.
+func (c *Client) DeleteMapping() *DeleteMappingService {
+	return NewDeleteMappingService(c)
+}
+
+// ClusterHealth retrieves the health of the cluster.
+func (c *Client) ClusterHealth() *ClusterHealthService {
+	return NewClusterHealthService(c)
+}
+
+// ClusterState retrieves the state of the cluster.
+func (c *Client) ClusterState() *ClusterStateService {
+	return NewClusterStateService(c)
+}
+
+// NodesInfo retrieves one or more or all of the cluster nodes information.
+func (c *Client) NodesInfo() *NodesInfoService {
+	return NewNodesInfoService(c)
+}
+
+// Reindex returns a service that will reindex documents from a source
+// index into a target index. See
+// http://www.elastic.co/guide/en/elasticsearch/guide/current/reindex.html
+// for more information about reindexing.
+func (c *Client) Reindex(sourceIndex, targetIndex string) *Reindexer {
+	return NewReindexer(c, sourceIndex, targetIndex)
+}

+ 611 - 0
sword_base/olivere/elastic.v1/client_test.go

@@ -0,0 +1,611 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+	"log"
+	"net/http"
+	"regexp"
+	"strings"
+	"testing"
+	"time"
+)
+
+func findConn(s string, slice ...*conn) (int, bool) {
+	for i, t := range slice {
+		if s == t.URL() {
+			return i, true
+		}
+	}
+	return -1, false
+}
+
+// -- NewClient --
+
+func TestClientDefaults(t *testing.T) {
+	client, err := NewClient()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if client.healthcheckEnabled != true {
+		t.Errorf("expected health checks to be enabled, got: %v", client.healthcheckEnabled)
+	}
+	if client.healthcheckTimeoutStartup != DefaultHealthcheckTimeoutStartup {
+		t.Errorf("expected health checks timeout on startup = %v, got: %v", DefaultHealthcheckTimeoutStartup, client.healthcheckTimeoutStartup)
+	}
+	if client.healthcheckTimeout != DefaultHealthcheckTimeout {
+		t.Errorf("expected health checks timeout = %v, got: %v", DefaultHealthcheckTimeout, client.healthcheckTimeout)
+	}
+	if client.healthcheckInterval != DefaultHealthcheckInterval {
+		t.Errorf("expected health checks interval = %v, got: %v", DefaultHealthcheckInterval, client.healthcheckInterval)
+	}
+	if client.snifferEnabled != true {
+		t.Errorf("expected sniffing to be enabled, got: %v", client.snifferEnabled)
+	}
+	if client.snifferTimeoutStartup != DefaultSnifferTimeoutStartup {
+		t.Errorf("expected sniffer timeout on startup = %v, got: %v", DefaultSnifferTimeoutStartup, client.snifferTimeoutStartup)
+	}
+	if client.snifferTimeout != DefaultSnifferTimeout {
+		t.Errorf("expected sniffer timeout = %v, got: %v", DefaultSnifferTimeout, client.snifferTimeout)
+	}
+	if client.snifferInterval != DefaultSnifferInterval {
+		t.Errorf("expected sniffer interval = %v, got: %v", DefaultSnifferInterval, client.snifferInterval)
+	}
+}
+
+func TestClientWithoutURL(t *testing.T) {
+	client, err := NewClient()
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Two things should happen here:
+	// 1. The client starts sniffing the cluster on DefaultURL
+	// 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL
+	if len(client.conns) == 0 {
+		t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns)
+	}
+	if !isTravis() {
+		if _, found := findConn(DefaultURL, client.conns...); !found {
+			t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+		}
+	}
+}
+
+func TestClientWithSingleURL(t *testing.T) {
+	client, err := NewClient(SetURL("http://localhost:9200"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	// Two things should happen here:
+	// 1. The client starts sniffing the cluster on DefaultURL
+	// 2. The sniffing process should find (at least) one node in the cluster, i.e. the DefaultURL
+	if len(client.conns) == 0 {
+		t.Fatalf("expected at least 1 node in the cluster, got: %d (%v)", len(client.conns), client.conns)
+	}
+	if !isTravis() {
+		if _, found := findConn(DefaultURL, client.conns...); !found {
+			t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+		}
+	}
+}
+
+func TestClientWithMultipleURLs(t *testing.T) {
+	client, err := NewClient(SetURL("http://localhost:9200", "http://localhost:9201"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	// The client should sniff both URLs, but only localhost:9200 should return nodes.
+	if len(client.conns) != 1 {
+		t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns)
+	}
+	if !isTravis() {
+		if client.conns[0].URL() != DefaultURL {
+			t.Errorf("expected to find node with default URL of %s in %v", DefaultURL, client.conns)
+		}
+	}
+}
+
+func TestClientSniffSuccess(t *testing.T) {
+	client, err := NewClient(SetURL("http://localhost:19200", "http://localhost:9200"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	// The client should sniff both URLs, but only localhost:9200 should return nodes.
+	if len(client.conns) != 1 {
+		t.Fatalf("expected exactly 1 node in the local cluster, got: %d (%v)", len(client.conns), client.conns)
+	}
+}
+
+func TestClientSniffFailure(t *testing.T) {
+	_, err := NewClient(SetURL("http://localhost:19200", "http://localhost:19201"))
+	if err == nil {
+		t.Fatalf("expected cluster to fail with no nodes found")
+	}
+}
+
+func TestClientSniffDisabled(t *testing.T) {
+	client, err := NewClient(SetSniff(false), SetURL("http://localhost:9200", "http://localhost:9201"))
+	if err != nil {
+		t.Fatal(err)
+	}
+	// The client should not sniff, so it should have two connections.
+	if len(client.conns) != 2 {
+		t.Fatalf("expected 2 nodes, got: %d (%v)", len(client.conns), client.conns)
+	}
+	// Make two requests, so that both connections are being used
+	for i := 0; i < len(client.conns); i++ {
+		_, err = client.Flush().Do()
+		if err != nil {
+			t.Fatal(err)
+		}
+	}
+	// The first connection (localhost:9200) should now be okay.
+	if i, found := findConn("http://localhost:9200", client.conns...); !found {
+		t.Fatalf("expected connection to %q to be found", "http://localhost:9200")
+	} else {
+		if conn := client.conns[i]; conn.IsDead() {
+			t.Fatal("expected connection to be alive, but it is dead")
+		}
+	}
+	// The second connection (localhost:9201) should now be marked as dead.
+	if i, found := findConn("http://localhost:9201", client.conns...); !found {
+		t.Fatalf("expected connection to %q to be found", "http://localhost:9201")
+	} else {
+		if conn := client.conns[i]; !conn.IsDead() {
+			t.Fatal("expected connection to be dead, but it is alive")
+		}
+	}
+}
+
+// -- Start and stop --
+
+func TestClientStartAndStop(t *testing.T) {
+	client, err := NewClient()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	running := client.IsRunning()
+	if !running {
+		t.Fatalf("expected background processes to run; got: %v", running)
+	}
+
+	// Stop
+	client.Stop()
+	running = client.IsRunning()
+	if running {
+		t.Fatalf("expected background processes to be stopped; got: %v", running)
+	}
+
+	// Stop again => no-op
+	client.Stop()
+	running = client.IsRunning()
+	if running {
+		t.Fatalf("expected background processes to be stopped; got: %v", running)
+	}
+
+	// Start
+	client.Start()
+	running = client.IsRunning()
+	if !running {
+		t.Fatalf("expected background processes to run; got: %v", running)
+	}
+
+	// Start again => no-op
+	client.Start()
+	running = client.IsRunning()
+	if !running {
+		t.Fatalf("expected background processes to run; got: %v", running)
+	}
+}
+
+// -- Sniffing --
+
+func TestClientSniffNode(t *testing.T) {
+	client, err := NewClient()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	ch := make(chan []*conn)
+	go func() { ch <- client.sniffNode(DefaultURL) }()
+
+	select {
+	case nodes := <-ch:
+		if len(nodes) != 1 {
+			t.Fatalf("expected %d nodes; got: %d", 1, len(nodes))
+		}
+		pattern := `http:\/\/[\d\.]+:9200`
+		matched, err := regexp.MatchString(pattern, nodes[0].URL())
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !matched {
+			t.Fatalf("expected node URL pattern %q; got: %q", pattern, nodes[0].URL())
+		}
+	case <-time.After(2 * time.Second):
+		t.Fatal("expected no timeout in sniff node")
+		break
+	}
+}
+
+func TestClientSniffOnDefaultURL(t *testing.T) {
+	client, _ := NewClient()
+	if client == nil {
+		t.Fatal("no client returned")
+	}
+
+	ch := make(chan error, 1)
+	go func() {
+		ch <- client.sniff(DefaultSnifferTimeoutStartup)
+	}()
+
+	select {
+	case err := <-ch:
+		if err != nil {
+			t.Fatalf("expected sniff to succeed; got: %v", err)
+		}
+		if len(client.conns) != 1 {
+			t.Fatalf("expected %d nodes; got: %d", 1, len(client.conns))
+		}
+		pattern := `http:\/\/[\d\.]+:9200`
+		matched, err := regexp.MatchString(pattern, client.conns[0].URL())
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !matched {
+			t.Fatalf("expected node URL pattern %q; got: %q", pattern, client.conns[0].URL())
+		}
+	case <-time.After(2 * time.Second):
+		t.Fatal("expected no timeout in sniff")
+		break
+	}
+}
+
+// -- Selector --
+
+func TestClientSelectConnHealthy(t *testing.T) {
+	client, err := NewClient(
+		SetSniff(false),
+		SetHealthcheck(false),
+		SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Both are healthy, so we should get both URLs in round-robin
+	client.conns[0].MarkAsHealthy()
+	client.conns[1].MarkAsHealthy()
+
+	// #1: Return 1st
+	c, err := client.next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c.URL() != client.conns[0].URL() {
+		t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+	}
+	// #2: Return 2nd
+	c, err = client.next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c.URL() != client.conns[1].URL() {
+		t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+	}
+	// #3: Return 1st
+	c, err = client.next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c.URL() != client.conns[0].URL() {
+		t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+	}
+}
+
+func TestClientSelectConnHealthyAndDead(t *testing.T) {
+	client, err := NewClient(
+		SetSniff(false),
+		SetHealthcheck(false),
+		SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// 1st is healthy, second is dead
+	client.conns[0].MarkAsHealthy()
+	client.conns[1].MarkAsDead()
+
+	// #1: Return 1st
+	c, err := client.next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c.URL() != client.conns[0].URL() {
+		t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+	}
+	// #2: Return 1st again
+	c, err = client.next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c.URL() != client.conns[0].URL() {
+		t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+	}
+	// #3: Return 1st again and again
+	c, err = client.next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c.URL() != client.conns[0].URL() {
+		t.Fatalf("expected %s; got: %s", c.URL(), client.conns[0].URL())
+	}
+}
+
+func TestClientSelectConnDeadAndHealthy(t *testing.T) {
+	client, err := NewClient(
+		SetSniff(false),
+		SetHealthcheck(false),
+		SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// 1st is dead, 2nd is healthy
+	client.conns[0].MarkAsDead()
+	client.conns[1].MarkAsHealthy()
+
+	// #1: Return 2nd
+	c, err := client.next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c.URL() != client.conns[1].URL() {
+		t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+	}
+	// #2: Return 2nd again
+	c, err = client.next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c.URL() != client.conns[1].URL() {
+		t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+	}
+	// #3: Return 2nd again and again
+	c, err = client.next()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if c.URL() != client.conns[1].URL() {
+		t.Fatalf("expected %s; got: %s", c.URL(), client.conns[1].URL())
+	}
+}
+
+func TestClientSelectConnAllDead(t *testing.T) {
+	client, err := NewClient(
+		SetSniff(false),
+		SetHealthcheck(false),
+		SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Both are dead
+	client.conns[0].MarkAsDead()
+	client.conns[1].MarkAsDead()
+
+	// #1: Return ErrNoClient
+	c, err := client.next()
+	if err != ErrNoClient {
+		t.Fatal(err)
+	}
+	if c != nil {
+		t.Fatalf("expected no connection; got: %v", c)
+	}
+	// #2: Return ErrNoClient again
+	c, err = client.next()
+	if err != ErrNoClient {
+		t.Fatal(err)
+	}
+	if c != nil {
+		t.Fatalf("expected no connection; got: %v", c)
+	}
+	// #3: Return ErrNoClient again and again
+	c, err = client.next()
+	if err != ErrNoClient {
+		t.Fatal(err)
+	}
+	if c != nil {
+		t.Fatalf("expected no connection; got: %v", c)
+	}
+}
+
+// -- ElasticsearchVersion --
+
+func TestElasticsearchVersion(t *testing.T) {
+	client, err := NewClient()
+	if err != nil {
+		t.Fatal(err)
+	}
+	version, err := client.ElasticsearchVersion(DefaultURL)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if version == "" {
+		t.Errorf("expected a version number, got: %q", version)
+	}
+}
+
+// -- IndexNames --
+
+func TestIndexNames(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+	names, err := client.IndexNames()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(names) == 0 {
+		t.Fatalf("expected some index names, got: %d", len(names))
+	}
+	var found bool
+	for _, name := range names {
+		if name == testIndexName {
+			found = true
+			break
+		}
+	}
+	if !found {
+		t.Fatalf("expected to find index %q; got: %v", testIndexName, found)
+	}
+}
+
+// -- PerformRequest --
+
+func TestPerformRequest(t *testing.T) {
+	client, err := NewClient()
+	if err != nil {
+		t.Fatal(err)
+	}
+	res, err := client.PerformRequest("GET", "/", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatal("expected response to be != nil")
+	}
+
+	ret := new(PingResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		t.Fatalf("expected no error on decode; got: %v", err)
+	}
+	if ret.Status != 200 {
+		t.Errorf("expected HTTP status 200; got: %d", ret.Status)
+	}
+}
+
+func TestPerformRequestWithLogger(t *testing.T) {
+	var w bytes.Buffer
+	out := log.New(&w, "LOGGER ", log.LstdFlags)
+
+	client, err := NewClient(SetInfoLog(out))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	res, err := client.PerformRequest("GET", "/", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatal("expected response to be != nil")
+	}
+
+	ret := new(PingResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		t.Fatalf("expected no error on decode; got: %v", err)
+	}
+	if ret.Status != 200 {
+		t.Errorf("expected HTTP status 200; got: %d", ret.Status)
+	}
+
+	got := w.String()
+	pattern := `^LOGGER \d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} GET http://.*/ \[status:200, request:\d+\.\d{3}s\]\n`
+	matched, err := regexp.MatchString(pattern, got)
+	if err != nil {
+		t.Fatalf("expected log line to match %q; got: %v", pattern, err)
+	}
+	if !matched {
+		t.Errorf("expected log line to match %q; got: %v", pattern, got)
+	}
+}
+
+func TestPerformRequestWithLoggerAndTracer(t *testing.T) {
+	var lw bytes.Buffer
+	lout := log.New(&lw, "LOGGER ", log.LstdFlags)
+
+	var tw bytes.Buffer
+	tout := log.New(&tw, "TRACER ", log.LstdFlags)
+
+	client, err := NewClient(SetInfoLog(lout), SetTraceLog(tout))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	res, err := client.PerformRequest("GET", "/", nil, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatal("expected response to be != nil")
+	}
+
+	ret := new(PingResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		t.Fatalf("expected no error on decode; got: %v", err)
+	}
+	if ret.Status != 200 {
+		t.Errorf("expected HTTP status 200; got: %d", ret.Status)
+	}
+
+	lgot := lw.String()
+	if lgot == "" {
+		t.Errorf("expected logger output; got: %q", lgot)
+	}
+
+	tgot := tw.String()
+	if tgot == "" {
+		t.Errorf("expected tracer output; got: %q", tgot)
+	}
+}
+
+// failingTransport will run a fail callback if it sees a given URL path prefix.
+type failingTransport struct {
+	path string                                      // path prefix to look for
+	fail func(*http.Request) (*http.Response, error) // call when path prefix is found
+	next http.RoundTripper                           // next round-tripper (use http.DefaultTransport if nil)
+}
+
+// RoundTrip implements a failing transport.
+func (tr *failingTransport) RoundTrip(r *http.Request) (*http.Response, error) {
+	if strings.HasPrefix(r.URL.Path, tr.path) && tr.fail != nil {
+		return tr.fail(r)
+	}
+	if tr.next != nil {
+		return tr.next.RoundTrip(r)
+	}
+	return http.DefaultTransport.RoundTrip(r)
+}
+
+func TestPerformRequestWithMaxRetries(t *testing.T) {
+	var numFailedReqs int
+	fail := func(r *http.Request) (*http.Response, error) {
+		numFailedReqs += 1
+		return &http.Response{Request: r, StatusCode: 400}, nil
+	}
+
+	// Run against a failing endpoint and see if PerformRequest
+	// retries correctly.
+	tr := &failingTransport{path: "/fail", fail: fail}
+	httpClient := &http.Client{Transport: tr}
+
+	client, err := NewClient(SetHttpClient(httpClient), SetMaxRetries(5))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	res, err := client.PerformRequest("GET", "/fail", nil, nil)
+	if err == nil {
+		t.Fatal("expected error")
+	}
+	if res != nil {
+		t.Fatal("expected no response")
+	}
+	// Connection should be marked as dead after it failed
+	if numFailedReqs != 5 {
+		t.Errorf("expected %d failed requests; got: %d", 5, numFailedReqs)
+	}
+}

+ 16 - 0
sword_base/olivere/elastic.v1/cluster-test/Makefile

@@ -0,0 +1,16 @@
+.PHONY: build run-omega-cluster-test
+
+default: build
+
+build:
+	go build cluster-test.go
+
+run-omega-cluster-test:
+	go run -race cluster-test.go \
+		-nodes=http://192.168.2.65:8200,http://192.168.2.64:8200 \
+		-n=5 \
+		-retries=5 \
+		-sniff=true -sniffer=10s \
+		-healthcheck=true -healthchecker=5s \
+		-errorlog=errors.log
+

+ 63 - 0
sword_base/olivere/elastic.v1/cluster-test/README.md

@@ -0,0 +1,63 @@
+# Cluster Test
+
+This directory contains a program you can use to test a cluster.
+
+Here's how:
+
+First, install a cluster of Elasticsearch nodes. You can install them on
+different computers, or start several nodes on a single machine.
+
+Build cluster-test by `go build cluster-test.go` (or build with `make`).
+
+Run `./cluster-test -h` to get a list of flags:
+
+```sh
+$ ./cluster-test -h
+Usage of ./cluster-test:
+  -errorlog="": error log file
+  -healthcheck=true: enable or disable healthchecks
+  -healthchecker=1m0s: healthcheck interval
+  -index="twitter": name of ES index to use
+  -infolog="": info log file
+  -n=5: number of goroutines that run searches
+  -nodes="": comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')
+  -retries=0: number of retries
+  -sniff=true: enable or disable sniffer
+  -sniffer=15m0s: sniffer interval
+  -tracelog="": trace log file
+```
+
+Example:
+
+```sh
+$ ./cluster-test -nodes=http://127.0.0.1:9200,http://127.0.0.1:9201,http://127.0.0.1:9202 -n=5 -index=twitter -retries=5 -sniff=true -sniffer=10s -healthcheck=true -healthchecker=5s -errorlog=error.log
+```
+
+The above example will create an index and start some search jobs on the
+cluster defined by http://127.0.0.1:9200, http://127.0.0.1:9201,
+and http://127.0.0.1:9202.
+
+* It will create an index called `twitter` on the cluster (`-index=twitter`)
+* It will run 5 search jobs in parallel (`-n=5`).
+* It will retry failed requests 5 times (`-retries=5`).
+* It will sniff the cluster periodically (`-sniff=true`).
+* It will sniff the cluster every 10 seconds (`-sniffer=10s`).
+* It will perform health checks periodically (`-healthcheck=true`).
+* It will perform health checks on the nodes every 5 seconds (`-healthchecker=5s`).
+* It will write an error log file (`-errorlog=error.log`).
+
+If you want to test Elastic with nodes going up and down, you can use a
+chaos monkey script like this and run it on the nodes of your cluster:
+
+```sh
+#!/bin/bash
+while true
+do
+	echo "Starting ES node"
+	elasticsearch -d -Xmx4g -Xms1g -Des.config=elasticsearch.yml -p es.pid
+	sleep `jot -r 1 10 300` # wait for 10-300s
+	echo "Stopping ES node"
+	kill -TERM `cat es.pid`
+	sleep `jot -r 1 10 60`  # wait for 10-60s
+done
+```

+ 357 - 0
sword_base/olivere/elastic.v1/cluster-test/cluster-test.go

@@ -0,0 +1,357 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package main
+
+import (
+	"encoding/json"
+	"errors"
+	"flag"
+	"fmt"
+	"log"
+	"math/rand"
+	"os"
+	"runtime"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"github.com/olivere/elastic"
+)
+
+type Tweet struct {
+	User     string                `json:"user"`
+	Message  string                `json:"message"`
+	Retweets int                   `json:"retweets"`
+	Image    string                `json:"image,omitempty"`
+	Created  time.Time             `json:"created,omitempty"`
+	Tags     []string              `json:"tags,omitempty"`
+	Location string                `json:"location,omitempty"`
+	Suggest  *elastic.SuggestField `json:"suggest_field,omitempty"`
+}
+
+var (
+	nodes         = flag.String("nodes", "", "comma-separated list of ES URLs (e.g. 'http://192.168.2.10:9200,http://192.168.2.11:9200')")
+	n             = flag.Int("n", 5, "number of goroutines that run searches")
+	index         = flag.String("index", "twitter", "name of ES index to use")
+	errorlogfile  = flag.String("errorlog", "", "error log file")
+	infologfile   = flag.String("infolog", "", "info log file")
+	tracelogfile  = flag.String("tracelog", "", "trace log file")
+	retries       = flag.Int("retries", elastic.DefaultMaxRetries, "number of retries")
+	sniff         = flag.Bool("sniff", elastic.DefaultSnifferEnabled, "enable or disable sniffer")
+	sniffer       = flag.Duration("sniffer", elastic.DefaultSnifferInterval, "sniffer interval")
+	healthcheck   = flag.Bool("healthcheck", elastic.DefaultHealthcheckEnabled, "enable or disable healthchecks")
+	healthchecker = flag.Duration("healthchecker", elastic.DefaultHealthcheckInterval, "healthcheck interval")
+)
+
+func main() {
+	flag.Parse()
+
+	runtime.GOMAXPROCS(runtime.NumCPU())
+
+	if *nodes == "" {
+		log.Fatal("no nodes specified")
+	}
+	urls := strings.SplitN(*nodes, ",", -1)
+
+	testcase, err := NewTestCase(*index, urls)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	testcase.SetErrorLogFile(*errorlogfile)
+	testcase.SetInfoLogFile(*infologfile)
+	testcase.SetTraceLogFile(*tracelogfile)
+	testcase.SetMaxRetries(*retries)
+	testcase.SetHealthcheck(*healthcheck)
+	testcase.SetHealthcheckInterval(*healthchecker)
+	testcase.SetSniff(*sniff)
+	testcase.SetSnifferInterval(*sniffer)
+
+	if err := testcase.Run(*n); err != nil {
+		log.Fatal(err)
+	}
+
+	select {}
+}
+
+type RunInfo struct {
+	Success bool
+}
+
+type TestCase struct {
+	nodes               []string
+	client              *elastic.Client
+	runs                int64
+	failures            int64
+	runCh               chan RunInfo
+	index               string
+	errorlogfile        string
+	infologfile         string
+	tracelogfile        string
+	maxRetries          int
+	healthcheck         bool
+	healthcheckInterval time.Duration
+	sniff               bool
+	snifferInterval     time.Duration
+}
+
+func NewTestCase(index string, nodes []string) (*TestCase, error) {
+	if index == "" {
+		return nil, errors.New("no index name specified")
+	}
+
+	return &TestCase{
+		index: index,
+		nodes: nodes,
+		runCh: make(chan RunInfo),
+	}, nil
+}
+
+func (t *TestCase) SetIndex(name string) {
+	t.index = name
+}
+
+func (t *TestCase) SetErrorLogFile(name string) {
+	t.errorlogfile = name
+}
+
+func (t *TestCase) SetInfoLogFile(name string) {
+	t.infologfile = name
+}
+
+func (t *TestCase) SetTraceLogFile(name string) {
+	t.tracelogfile = name
+}
+
+func (t *TestCase) SetMaxRetries(n int) {
+	t.maxRetries = n
+}
+
+func (t *TestCase) SetSniff(enabled bool) {
+	t.sniff = enabled
+}
+
+func (t *TestCase) SetSnifferInterval(d time.Duration) {
+	t.snifferInterval = d
+}
+
+func (t *TestCase) SetHealthcheck(enabled bool) {
+	t.healthcheck = enabled
+}
+
+func (t *TestCase) SetHealthcheckInterval(d time.Duration) {
+	t.healthcheckInterval = d
+}
+
+func (t *TestCase) Run(n int) error {
+	if err := t.setup(); err != nil {
+		return err
+	}
+
+	for i := 1; i < n; i++ {
+		go t.search()
+	}
+
+	go t.monitor()
+
+	return nil
+}
+
+func (t *TestCase) monitor() {
+	print := func() {
+		fmt.Printf("\033[32m%5d\033[0m; \033[31m%5d\033[0m: %s%s\r", t.runs, t.failures, t.client.String(), "    ")
+	}
+
+	for {
+		select {
+		case run := <-t.runCh:
+			atomic.AddInt64(&t.runs, 1)
+			if !run.Success {
+				atomic.AddInt64(&t.failures, 1)
+				fmt.Println()
+			}
+			print()
+		case <-time.After(5 * time.Second):
+			// Print stats after some inactivity
+			print()
+			break
+		}
+	}
+}
+
+func (t *TestCase) setup() error {
+	var errorlogger *log.Logger
+	if t.errorlogfile != "" {
+		f, err := os.OpenFile(t.errorlogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+		if err != nil {
+			return err
+		}
+		errorlogger = log.New(f, "", log.Ltime|log.Lmicroseconds|log.Lshortfile)
+	}
+
+	var infologger *log.Logger
+	if t.infologfile != "" {
+		f, err := os.OpenFile(t.infologfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+		if err != nil {
+			return err
+		}
+		infologger = log.New(f, "", log.LstdFlags)
+	}
+
+	// Trace request and response details like this
+	var tracelogger *log.Logger
+	if t.tracelogfile != "" {
+		f, err := os.OpenFile(t.tracelogfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
+		if err != nil {
+			return err
+		}
+		tracelogger = log.New(f, "", log.LstdFlags)
+	}
+
+	client, err := elastic.NewClient(
+		elastic.SetURL(t.nodes...),
+		elastic.SetErrorLog(errorlogger),
+		elastic.SetInfoLog(infologger),
+		elastic.SetTraceLog(tracelogger),
+		elastic.SetMaxRetries(t.maxRetries),
+		elastic.SetSniff(t.sniff),
+		elastic.SetSnifferInterval(t.snifferInterval),
+		elastic.SetHealthcheck(t.healthcheck),
+		elastic.SetHealthcheckInterval(t.healthcheckInterval))
+	if err != nil {
+		// Handle error
+		return err
+	}
+	t.client = client
+
+	// Use the IndexExists service to check if a specified index exists.
+	exists, err := t.client.IndexExists(t.index).Do()
+	if err != nil {
+		return err
+	}
+	if exists {
+		deleteIndex, err := t.client.DeleteIndex(t.index).Do()
+		if err != nil {
+			return err
+		}
+		if !deleteIndex.Acknowledged {
+			return errors.New("delete index not acknowledged")
+		}
+	}
+
+	// Create a new index.
+	createIndex, err := t.client.CreateIndex(t.index).Do()
+	if err != nil {
+		return err
+	}
+	if !createIndex.Acknowledged {
+		return errors.New("create index not acknowledged")
+	}
+
+	// Index a tweet (using JSON serialization)
+	tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+	_, err = t.client.Index().
+		Index(t.index).
+		Type("tweet").
+		Id("1").
+		BodyJson(tweet1).
+		Do()
+	if err != nil {
+		return err
+	}
+
+	// Index a second tweet (by string)
+	tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+	_, err = t.client.Index().
+		Index(t.index).
+		Type("tweet").
+		Id("2").
+		BodyString(tweet2).
+		Do()
+	if err != nil {
+		return err
+	}
+
+	// Flush to make sure the documents got written.
+	_, err = t.client.Flush().Index(t.index).Do()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *TestCase) search() {
+	// Loop forever to check for connection issues
+	for {
+		// Get tweet with specified ID
+		get1, err := t.client.Get().
+			Index(t.index).
+			Type("tweet").
+			Id("1").
+			Do()
+		if err != nil {
+			//failf("Get failed: %v", err)
+			t.runCh <- RunInfo{Success: false}
+			continue
+		}
+		if !get1.Found {
+			//log.Printf("Document %s not found\n", "1")
+			//fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+			t.runCh <- RunInfo{Success: false}
+			continue
+		}
+
+		// Search with a term query
+		termQuery := elastic.NewTermQuery("user", "olivere")
+		searchResult, err := t.client.Search().
+			Index(t.index).     // search in index t.index
+			Query(&termQuery).  // specify the query
+			Sort("user", true). // sort by "user" field, ascending
+			From(0).Size(10).   // take documents 0-9
+			Pretty(true).       // pretty print request and response JSON
+			Do()                // execute
+		if err != nil {
+			//failf("Search failed: %v\n", err)
+			t.runCh <- RunInfo{Success: false}
+			continue
+		}
+
+		// searchResult is of type SearchResult and returns hits, suggestions,
+		// and all kinds of other information from Elasticsearch.
+		//fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+		// Number of hits
+		if searchResult.Hits != nil {
+			//fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+			// Iterate through results
+			for _, hit := range searchResult.Hits.Hits {
+				// hit.Index contains the name of the index
+
+				// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+				var tweet Tweet
+				err := json.Unmarshal(*hit.Source, &tweet)
+				if err != nil {
+					// Deserialization failed
+					//failf("Deserialize failed: %v\n", err)
+					t.runCh <- RunInfo{Success: false}
+					continue
+				}
+
+				// Work with tweet
+				//fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+			}
+		} else {
+			// No hits
+			//fmt.Print("Found no tweets\n")
+		}
+
+		t.runCh <- RunInfo{Success: true}
+
+		// Sleep some time
+		time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond)
+	}
+}

+ 185 - 0
sword_base/olivere/elastic.v1/cluster_health.go

@@ -0,0 +1,185 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// ClusterHealthService allows to get the status of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-health.html.
+type ClusterHealthService struct {
+	client                  *Client
+	pretty                  bool
+	indices                 []string
+	waitForStatus           string
+	level                   string
+	local                   *bool
+	masterTimeout           string
+	timeout                 string
+	waitForActiveShards     *int
+	waitForNodes            string
+	waitForRelocatingShards *int
+}
+
+// NewClusterHealthService creates a new ClusterHealthService.
+func NewClusterHealthService(client *Client) *ClusterHealthService {
+	return &ClusterHealthService{client: client, indices: make([]string, 0)}
+}
+
+// Index limits the information returned to a specific index.
+func (s *ClusterHealthService) Index(index string) *ClusterHealthService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices limits the information returned to specific indices.
+func (s *ClusterHealthService) Indices(indices ...string) *ClusterHealthService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// MasterTimeout specifies an explicit operation timeout for connection to master node.
+func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// Timeout specifies an explicit operation timeout.
+func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService {
+	s.timeout = timeout
+	return s
+}
+
+// WaitForActiveShards can be used to wait until the specified number of shards are active.
+func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService {
+	s.waitForActiveShards = &waitForActiveShards
+	return s
+}
+
+// WaitForNodes can be used to wait until the specified number of nodes are available.
+func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService {
+	s.waitForNodes = waitForNodes
+	return s
+}
+
+// WaitForRelocatingShards can be used to wait until the specified number of relocating shards is finished.
+func (s *ClusterHealthService) WaitForRelocatingShards(waitForRelocatingShards int) *ClusterHealthService {
+	s.waitForRelocatingShards = &waitForRelocatingShards
+	return s
+}
+
+// WaitForStatus can be used to wait until the cluster is in a specific state.
+// Valid values are: green, yellow, or red.
+func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService {
+	s.waitForStatus = waitForStatus
+	return s
+}
+
+// Level specifies the level of detail for returned information.
+func (s *ClusterHealthService) Level(level string) *ClusterHealthService {
+	s.level = level
+	return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterHealthService) Local(local bool) *ClusterHealthService {
+	s.local = &local
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterHealthService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_cluster/health/{index}", map[string]string{
+		"index": strings.Join(s.indices, ","),
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.waitForRelocatingShards != nil {
+		params.Set("wait_for_relocating_shards", fmt.Sprintf("%d", *s.waitForRelocatingShards))
+	}
+	if s.waitForStatus != "" {
+		params.Set("wait_for_status", s.waitForStatus)
+	}
+	if s.level != "" {
+		params.Set("level", s.level)
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.waitForActiveShards != nil {
+		params.Set("wait_for_active_shards", fmt.Sprintf("%d", *s.waitForActiveShards))
+	}
+	if s.waitForNodes != "" {
+		params.Set("wait_for_nodes", s.waitForNodes)
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterHealthService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *ClusterHealthService) Do() (*ClusterHealthResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	resp := new(ClusterHealthResponse)
+	if err := json.Unmarshal(res.Body, resp); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// ClusterHealthResponse is the response of ClusterHealthService.Do.
+type ClusterHealthResponse struct {
+	ClusterName         string `json:"cluster_name"`
+	Status              string `json:"status"`
+	TimedOut            bool   `json:"timed_out"`
+	NumberOfNodes       int    `json:"number_of_nodes"`
+	NumberOfDataNodes   int    `json:"number_of_data_nodes"`
+	ActivePrimaryShards int    `json:"active_primary_shards"`
+	ActiveShards        int    `json:"active_shards"`
+	RelocatingShards    int    `json:"relocating_shards"`
+	InitializedShards   int    `json:"initialized_shards"`
+	UnassignedShards    int    `json:"unassigned_shards"`
+}

+ 74 - 0
sword_base/olivere/elastic.v1/cluster_health_test.go

@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"net/url"
+	"testing"
+)
+
+func TestClusterHealth(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	// Get cluster health
+	res, err := client.ClusterHealth().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatalf("expected res to be != nil; got: %v", res)
+	}
+	if res.Status != "green" && res.Status != "red" && res.Status != "yellow" {
+		t.Fatalf("expected status \"green\", \"red\", or \"yellow\"; got: %q", res.Status)
+	}
+}
+
+func TestClusterHealthURLs(t *testing.T) {
+	tests := []struct {
+		Service        *ClusterHealthService
+		ExpectedPath   string
+		ExpectedParams url.Values
+	}{
+		{
+			Service: &ClusterHealthService{
+				indices: []string{},
+			},
+			ExpectedPath: "/_cluster/health/",
+		},
+		{
+			Service: &ClusterHealthService{
+				indices: []string{"twitter"},
+			},
+			ExpectedPath: "/_cluster/health/twitter",
+		},
+		{
+			Service: &ClusterHealthService{
+				indices: []string{"twitter", "gplus"},
+			},
+			ExpectedPath: "/_cluster/health/twitter%2Cgplus",
+		},
+		{
+			Service: &ClusterHealthService{
+				indices:       []string{"twitter"},
+				waitForStatus: "yellow",
+			},
+			ExpectedPath:   "/_cluster/health/twitter",
+			ExpectedParams: url.Values{"wait_for_status": []string{"yellow"}},
+		},
+	}
+
+	for _, test := range tests {
+		gotPath, gotParams, err := test.Service.buildURL()
+		if err != nil {
+			t.Fatalf("expected no error; got: %v", err)
+		}
+		if gotPath != test.ExpectedPath {
+			t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+		}
+		if gotParams.Encode() != test.ExpectedParams.Encode() {
+			t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+		}
+	}
+}

+ 192 - 0
sword_base/olivere/elastic.v1/cluster_state.go

@@ -0,0 +1,192 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// ClusterStateService returns the state of the cluster.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/cluster-state.html.
+type ClusterStateService struct {
+	client        *Client
+	pretty        bool
+	indices       []string
+	metrics       []string
+	local         *bool
+	masterTimeout string
+	flatSettings  *bool
+}
+
+// NewClusterStateService creates a new ClusterStateService.
+func NewClusterStateService(client *Client) *ClusterStateService {
+	return &ClusterStateService{
+		client:  client,
+		indices: make([]string, 0),
+		metrics: make([]string, 0),
+	}
+}
+
+// Index the name of the index. Use _all or an empty string to perform
+// the operation on all indices.
+func (s *ClusterStateService) Index(index string) *ClusterStateService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices is a list of index names. Use _all or an empty string to
+// perform the operation on all indices.
+func (s *ClusterStateService) Indices(indices ...string) *ClusterStateService {
+	s.indices = make([]string, 0)
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Metric limits the information returned to the specified metric.
+// It can be one of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metric(metric string) *ClusterStateService {
+	s.metrics = make([]string, 0)
+	s.metrics = append(s.metrics, metric)
+	return s
+}
+
+// Metrics limits the information returned to the specified metrics.
+// It can be any of: version, master_node, nodes, routing_table, metadata,
+// blocks, or customs.
+func (s *ClusterStateService) Metrics(metrics ...string) *ClusterStateService {
+	s.metrics = make([]string, 0)
+	s.metrics = append(s.metrics, metrics...)
+	return s
+}
+
+// Local indicates whether to return local information. If it is true,
+// we do not retrieve the state from master node (default: false).
+func (s *ClusterStateService) Local(local bool) *ClusterStateService {
+	s.local = &local
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService {
+	s.flatSettings = &flatSettings
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ClusterStateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	metrics := strings.Join(s.metrics, ",")
+	if metrics == "" {
+		metrics = "_all"
+	}
+	indices := strings.Join(s.indices, ",")
+	if indices == "" {
+		indices = "_all"
+	}
+	path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{
+		"metrics": metrics,
+		"indices": indices,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.flatSettings != nil {
+		params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ClusterStateService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *ClusterStateService) Do() (*ClusterStateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(ClusterStateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// ClusterStateResponse is the response of ClusterStateService.Do.
+type ClusterStateResponse struct {
+	ClusterName  string                               `json:"cluster_name"`
+	Version      int                                  `json:"version"`
+	MasterNode   string                               `json:"master_node"`
+	Blocks       map[string]interface{}               `json:"blocks"`
+	Nodes        map[string]*ClusterStateNode         `json:"nodes"`
+	Metadata     *ClusterStateMetadata                `json:"metadata"`
+	RoutingTable map[string]*ClusterStateRoutingTable `json:"routing_table"`
+	RoutingNodes *ClusterStateRoutingNode             `json:"routing_nodes"`
+	Allocations  []interface{}                        `json:"allocations"`
+	Customs      map[string]interface{}               `json:"customs"`
+}
+
+type ClusterStateMetadata struct {
+	Templates    map[string]interface{} `json:"templates"`
+	Indices      map[string]interface{} `json:"indices"`
+	Repositories map[string]interface{} `json:"repositories"`
+}
+
+type ClusterStateNode struct {
+	State          string  `json:"state"`
+	Primary        bool    `json:"primary"`
+	Node           string  `json:"node"`
+	RelocatingNode *string `json:"relocating_node"`
+	Shard          int     `json:"shard"`
+	Index          string  `json:"index"`
+}
+
+type ClusterStateRoutingTable struct {
+	Indices map[string]interface{} `json:"indices"`
+}
+
+type ClusterStateRoutingNode struct {
+	Unassigned []interface{}          `json:"unassigned"`
+	Nodes      map[string]interface{} `json:"nodes"`
+}

+ 92 - 0
sword_base/olivere/elastic.v1/cluster_state_test.go

@@ -0,0 +1,92 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"net/url"
+	"testing"
+)
+
+func TestClusterState(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	// Get cluster state
+	res, err := client.ClusterState().Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatalf("expected res to be != nil; got: %v", res)
+	}
+	if res.ClusterName == "" {
+		t.Fatalf("expected a cluster name; got: %q", res.ClusterName)
+	}
+}
+
+func TestClusterStateURLs(t *testing.T) {
+	tests := []struct {
+		Service        *ClusterStateService
+		ExpectedPath   string
+		ExpectedParams url.Values
+	}{
+		{
+			Service: &ClusterStateService{
+				indices: []string{},
+				metrics: []string{},
+			},
+			ExpectedPath: "/_cluster/state/_all/_all",
+		},
+		{
+			Service: &ClusterStateService{
+				indices: []string{"twitter"},
+				metrics: []string{},
+			},
+			ExpectedPath: "/_cluster/state/_all/twitter",
+		},
+		{
+			Service: &ClusterStateService{
+				indices: []string{"twitter", "gplus"},
+				metrics: []string{},
+			},
+			ExpectedPath: "/_cluster/state/_all/twitter%2Cgplus",
+		},
+		{
+			Service: &ClusterStateService{
+				indices: []string{},
+				metrics: []string{"nodes"},
+			},
+			ExpectedPath: "/_cluster/state/nodes/_all",
+		},
+		{
+			Service: &ClusterStateService{
+				indices: []string{"twitter"},
+				metrics: []string{"nodes"},
+			},
+			ExpectedPath: "/_cluster/state/nodes/twitter",
+		},
+		{
+			Service: &ClusterStateService{
+				indices:       []string{"twitter"},
+				metrics:       []string{"nodes"},
+				masterTimeout: "1s",
+			},
+			ExpectedPath:   "/_cluster/state/nodes/twitter",
+			ExpectedParams: url.Values{"master_timeout": []string{"1s"}},
+		},
+	}
+
+	for _, test := range tests {
+		gotPath, gotParams, err := test.Service.buildURL()
+		if err != nil {
+			t.Fatalf("expected no error; got: %v", err)
+		}
+		if gotPath != test.ExpectedPath {
+			t.Errorf("expected URL path = %q; got: %q", test.ExpectedPath, gotPath)
+		}
+		if gotParams.Encode() != test.ExpectedParams.Encode() {
+			t.Errorf("expected URL params = %v; got: %v", test.ExpectedParams, gotParams)
+		}
+	}
+}

+ 90 - 0
sword_base/olivere/elastic.v1/connection.go

@@ -0,0 +1,90 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"sync"
+	"time"
+)
+
+// conn represents a single connection to a node in a cluster.
+type conn struct {
+	sync.RWMutex
+	nodeID    string // node ID
+	url       string
+	failures  int
+	dead      bool
+	deadSince *time.Time
+}
+
+// newConn creates a new connection to the given URL.
+func newConn(nodeID, url string) *conn {
+	c := &conn{
+		nodeID: nodeID,
+		url:    url,
+	}
+	return c
+}
+
+// String returns a representation of the connection status.
+func (c *conn) String() string {
+	c.RLock()
+	defer c.RUnlock()
+	return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince)
+}
+
+// NodeID returns the ID of the node of this connection.
+func (c *conn) NodeID() string {
+	c.RLock()
+	defer c.RUnlock()
+	return c.nodeID
+}
+
+// URL returns the URL of this connection.
+func (c *conn) URL() string {
+	c.RLock()
+	defer c.RUnlock()
+	return c.url
+}
+
+// IsDead returns true if this connection is marked as dead, i.e. a previous
+// request to the URL has been unsuccessful.
+func (c *conn) IsDead() bool {
+	c.RLock()
+	defer c.RUnlock()
+	return c.dead
+}
+
+// MarkAsDead marks this connection as dead, increments the failures
+// counter and stores the current time in dead since.
+func (c *conn) MarkAsDead() {
+	c.Lock()
+	c.dead = true
+	if c.deadSince == nil {
+		utcNow := time.Now().UTC()
+		c.deadSince = &utcNow
+	}
+	c.failures += 1
+	c.Unlock()
+}
+
+// MarkAsAlive marks this connection as eligible to be returned from the
+// pool of connections by the selector.
+func (c *conn) MarkAsAlive() {
+	c.Lock()
+	c.dead = false
+	c.Unlock()
+}
+
+// MarkAsHealthy marks this connection as healthy, i.e. a request has been
+// successfully performed with it.
+func (c *conn) MarkAsHealthy() {
+	c.Lock()
+	c.dead = false
+	c.deadSince = nil
+	c.failures = 0
+	c.Unlock()
+}

+ 152 - 0
sword_base/olivere/elastic.v1/count.go

@@ -0,0 +1,152 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// CountService is a convenient service for determining the
+// number of documents in an index. Use SearchService with
+// a SearchType of count for counting with queries etc.
+type CountService struct {
+	client  *Client
+	indices []string
+	types   []string
+	query   Query
+	pretty  bool
+}
+
+// CountResult is the result returned from using the Count API
+// (http://www.elasticsearch.org/guide/reference/api/count/)
+type CountResult struct {
+	Count  int64      `json:"count"`
+	Shards shardsInfo `json:"_shards,omitempty"`
+}
+
+func NewCountService(client *Client) *CountService {
+	builder := &CountService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *CountService) Index(index string) *CountService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *CountService) Indices(indices ...string) *CountService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+func (s *CountService) Type(typ string) *CountService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+func (s *CountService) Types(types ...string) *CountService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+func (s *CountService) Query(query Query) *CountService {
+	s.query = query
+	return s
+}
+
+func (s *CountService) Pretty(pretty bool) *CountService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *CountService) Do() (int64, error) {
+	var err error
+
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err = uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return 0, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		path += strings.Join(indexPart, ",")
+	}
+
+	// Types part
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err = uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return 0, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		path += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	path += "/_count"
+
+	// Parameters
+	params := make(url.Values)
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Set body if there is a query specified
+	var body interface{}
+	if s.query != nil {
+		query := make(map[string]interface{})
+		query["query"] = s.query.Source()
+		body = query
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, body)
+	if err != nil {
+		return 0, err
+	}
+
+	// Return result
+	ret := new(CountResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return 0, err
+	}
+	if ret != nil {
+		return ret.Count, nil
+	}
+
+	return int64(0), nil
+}

+ 83 - 0
sword_base/olivere/elastic.v1/count_test.go

@@ -0,0 +1,83 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestCount(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Count documents
+	count, err := client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Errorf("expected Count = %d; got %d", 3, count)
+	}
+
+	// Count documents
+	count, err = client.Count(testIndexName).Type("tweet").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Errorf("expected Count = %d; got %d", 3, count)
+	}
+
+	// Count documents
+	count, err = client.Count(testIndexName).Type("gezwitscher").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 0 {
+		t.Errorf("expected Count = %d; got %d", 0, count)
+	}
+
+	// Count with query
+	query := NewTermQuery("user", "olivere")
+	count, err = client.Count(testIndexName).Query(query).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Errorf("expected Count = %d; got %d", 2, count)
+	}
+
+	// Count with query and type
+	query = NewTermQuery("user", "olivere")
+	count, err = client.Count(testIndexName).Type("tweet").Query(query).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Errorf("expected Count = %d; got %d", 2, count)
+	}
+}

+ 75 - 0
sword_base/olivere/elastic.v1/create_index.go

@@ -0,0 +1,75 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type CreateIndexService struct {
+	client *Client
+	index  string
+	body   string
+	pretty bool
+}
+
+func NewCreateIndexService(client *Client) *CreateIndexService {
+	builder := &CreateIndexService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *CreateIndexService) Index(index string) *CreateIndexService {
+	b.index = index
+	return b
+}
+
+func (b *CreateIndexService) Body(body string) *CreateIndexService {
+	b.body = body
+	return b
+}
+
+func (b *CreateIndexService) Pretty(pretty bool) *CreateIndexService {
+	b.pretty = pretty
+	return b
+}
+
+func (b *CreateIndexService) Do() (*CreateIndexResult, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}/", map[string]string{
+		"index": b.index,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	params := make(url.Values)
+	if b.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", b.pretty))
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest("PUT", path, params, b.body)
+	if err != nil {
+		return nil, err
+	}
+
+	ret := new(CreateIndexResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a create index request.
+
+type CreateIndexResult struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 26 - 0
sword_base/olivere/elastic.v1/decoder.go

@@ -0,0 +1,26 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+)
+
+// Decoder is used to decode responses from Elasticsearch.
+// Users of elastic can implement their own marshaler for advanced purposes
+// and set them per Client (see SetDecoder). If none is specified,
+// DefaultDecoder is used.
+type Decoder interface {
+	Decode(data []byte, v interface{}) error
+}
+
+// DefaultDecoder uses json.Unmarshal from the Go standard library
+// to decode JSON data.
+type DefaultDecoder struct{}
+
+// Decode decodes with json.Unmarshal from the Go standard library.
+func (u *DefaultDecoder) Decode(data []byte, v interface{}) error {
+	return json.Unmarshal(data, v)
+}

+ 49 - 0
sword_base/olivere/elastic.v1/decoder_test.go

@@ -0,0 +1,49 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"bytes"
+	"encoding/json"
+	"sync/atomic"
+	"testing"
+)
+
+type decoder struct {
+	dec json.Decoder
+
+	N int64
+}
+
+func (d *decoder) Decode(data []byte, v interface{}) error {
+	atomic.AddInt64(&d.N, 1)
+	dec := json.NewDecoder(bytes.NewReader(data))
+	dec.UseNumber()
+	return dec.Decode(v)
+}
+
+func TestDecoder(t *testing.T) {
+	dec := &decoder{}
+	client := setupTestClientAndCreateIndex(t, SetDecoder(dec), SetMaxRetries(0))
+
+	tweet := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+	// Add a document
+	indexResult, err := client.Index().
+		Index(testIndexName).
+		Type("tweet").
+		Id("1").
+		BodyJson(&tweet).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", indexResult)
+	}
+	if dec.N <= 0 {
+		t.Errorf("expected at least 1 call of decoder; got: %d", dec.N)
+	}
+}

+ 118 - 0
sword_base/olivere/elastic.v1/delete.go

@@ -0,0 +1,118 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type DeleteService struct {
+	client  *Client
+	index   string
+	_type   string
+	id      string
+	routing string
+	refresh *bool
+	version *int
+	pretty  bool
+}
+
+func NewDeleteService(client *Client) *DeleteService {
+	builder := &DeleteService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *DeleteService) Index(index string) *DeleteService {
+	s.index = index
+	return s
+}
+
+func (s *DeleteService) Type(_type string) *DeleteService {
+	s._type = _type
+	return s
+}
+
+func (s *DeleteService) Id(id string) *DeleteService {
+	s.id = id
+	return s
+}
+
+func (s *DeleteService) Parent(parent string) *DeleteService {
+	if s.routing == "" {
+		s.routing = parent
+	}
+	return s
+}
+
+func (s *DeleteService) Refresh(refresh bool) *DeleteService {
+	s.refresh = &refresh
+	return s
+}
+
+func (s *DeleteService) Version(version int) *DeleteService {
+	s.version = &version
+	return s
+}
+
+func (s *DeleteService) Pretty(pretty bool) *DeleteService {
+	s.pretty = pretty
+	return s
+}
+
+func (s *DeleteService) Do() (*DeleteResult, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+		"index": s.index,
+		"type":  s._type,
+		"id":    s.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Parameters
+	params := make(url.Values)
+	if s.refresh != nil {
+		params.Set("refresh", fmt.Sprintf("%v", *s.refresh))
+	}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *s.version))
+	}
+	if s.routing != "" {
+		params.Set("routing", fmt.Sprintf("%s", s.routing))
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("DELETE", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return response
+	ret := new(DeleteResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a delete request.
+
+type DeleteResult struct {
+	Found   bool   `json:"found"`
+	Index   string `json:"_index"`
+	Type    string `json:"_type"`
+	Id      string `json:"_id"`
+	Version int64  `json:"_version"`
+}

+ 292 - 0
sword_base/olivere/elastic.v1/delete_by_query.go

@@ -0,0 +1,292 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// DeleteByQueryService deletes documents that match a query.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/docs-delete-by-query.html.
+type DeleteByQueryService struct {
+	client            *Client
+	indices           []string
+	types             []string
+	analyzer          string
+	consistency       string
+	defaultOper       string
+	df                string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+	replication       string
+	routing           string
+	timeout           string
+	pretty            bool
+	q                 string
+	query             Query
+}
+
+// NewDeleteByQueryService creates a new DeleteByQueryService.
+// You typically use the client's DeleteByQuery to get a reference to
+// the service.
+func NewDeleteByQueryService(client *Client) *DeleteByQueryService {
+	builder := &DeleteByQueryService{
+		client: client,
+	}
+	return builder
+}
+
+// Index limits the delete-by-query to a single index.
+// You can use _all to perform the operation on all indices.
+func (s *DeleteByQueryService) Index(index string) *DeleteByQueryService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+// Indices sets the indices on which to perform the delete operation.
+func (s *DeleteByQueryService) Indices(indices ...string) *DeleteByQueryService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Type limits the delete operation to the given type.
+func (s *DeleteByQueryService) Type(typ string) *DeleteByQueryService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, typ)
+	return s
+}
+
+// Types limits the delete operation to the given types.
+func (s *DeleteByQueryService) Types(types ...string) *DeleteByQueryService {
+	if s.types == nil {
+		s.types = make([]string, 0)
+	}
+	s.types = append(s.types, types...)
+	return s
+}
+
+// Analyzer to use for the query string.
+func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService {
+	s.analyzer = analyzer
+	return s
+}
+
+// Consistency represents the specific write consistency setting for the operation.
+// It can be one, quorum, or all.
+func (s *DeleteByQueryService) Consistency(consistency string) *DeleteByQueryService {
+	s.consistency = consistency
+	return s
+}
+
+// DefaultOperator for query string query (AND or OR).
+func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService {
+	s.defaultOper = defaultOperator
+	return s
+}
+
+// DF is the field to use as default where no field prefix is given in the query string.
+func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService {
+	s.df = defaultField
+	return s
+}
+
+// DefaultField is the field to use as default where no field prefix is given in the query string.
+// It is an alias to the DF func.
+func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService {
+	s.df = defaultField
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService {
+	s.ignoreUnavailable = &ignore
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices (including the _all string
+// or when no indices have been specified).
+func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService {
+	s.allowNoIndices = &allow
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both. It can be "open" or "closed".
+func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService {
+	s.expandWildcards = expand
+	return s
+}
+
+// Replication sets a specific replication type (sync or async).
+func (s *DeleteByQueryService) Replication(replication string) *DeleteByQueryService {
+	s.replication = replication
+	return s
+}
+
+// Q specifies the query in Lucene query string syntax. You can also use
+// Query to programmatically specify the query.
+func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService {
+	s.q = query
+	return s
+}
+
+// QueryString is an alias to Q. Notice that you can also use Query to
+// programmatically set the query.
+func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService {
+	s.q = query
+	return s
+}
+
+// Routing sets a specific routing value.
+func (s *DeleteByQueryService) Routing(routing string) *DeleteByQueryService {
+	s.routing = routing
+	return s
+}
+
+// Timeout sets an explicit operation timeout, e.g. "1s" or "10000ms".
+func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService {
+	s.timeout = timeout
+	return s
+}
+
+// Pretty indents the JSON output from Elasticsearch.
+func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService {
+	s.pretty = pretty
+	return s
+}
+
+// Query sets the query programmatically.
+func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService {
+	s.query = query
+	return s
+}
+
+// Do executes the delete-by-query operation.
+func (s *DeleteByQueryService) Do() (*DeleteByQueryResult, error) {
+	var err error
+
+	// Build url
+	path := "/"
+
+	// Indices part
+	indexPart := make([]string, 0)
+	for _, index := range s.indices {
+		index, err = uritemplates.Expand("{index}", map[string]string{
+			"index": index,
+		})
+		if err != nil {
+			return nil, err
+		}
+		indexPart = append(indexPart, index)
+	}
+	if len(indexPart) > 0 {
+		path += strings.Join(indexPart, ",")
+	}
+
+	// Types part
+	typesPart := make([]string, 0)
+	for _, typ := range s.types {
+		typ, err = uritemplates.Expand("{type}", map[string]string{
+			"type": typ,
+		})
+		if err != nil {
+			return nil, err
+		}
+		typesPart = append(typesPart, typ)
+	}
+	if len(typesPart) > 0 {
+		path += "/" + strings.Join(typesPart, ",")
+	}
+
+	// Search
+	path += "/_query"
+
+	// Parameters
+	params := make(url.Values)
+	if s.analyzer != "" {
+		params.Set("analyzer", s.analyzer)
+	}
+	if s.consistency != "" {
+		params.Set("consistency", s.consistency)
+	}
+	if s.defaultOper != "" {
+		params.Set("default_operator", s.defaultOper)
+	}
+	if s.df != "" {
+		params.Set("df", s.df)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.replication != "" {
+		params.Set("replication", s.replication)
+	}
+	if s.routing != "" {
+		params.Set("routing", s.routing)
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.pretty {
+		params.Set("pretty", fmt.Sprintf("%v", s.pretty))
+	}
+	if s.q != "" {
+		params.Set("q", s.q)
+	}
+
+	// Set body if there is a query set
+	var body interface{}
+	if s.query != nil {
+		query := make(map[string]interface{})
+		query["query"] = s.query.Source()
+		body = query
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("DELETE", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(DeleteByQueryResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// DeleteByQueryResult is the outcome of executing Do with DeleteByQueryService.
+type DeleteByQueryResult struct {
+	Indices map[string]IndexDeleteByQueryResult `json:"_indices"`
+}
+
+// IndexDeleteByQueryResult is the result of a delete-by-query for a specific
+// index.
+type IndexDeleteByQueryResult struct {
+	Shards shardsInfo `json:"_shards"`
+}

+ 76 - 0
sword_base/olivere/elastic.v1/delete_by_query_test.go

@@ -0,0 +1,76 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestDeleteByQuery(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Count documents
+	count, err := client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Fatalf("expected count = %d; got: %d", 3, count)
+	}
+
+	// Delete all documents by sandrae
+	q := NewTermQuery("user", "sandrae")
+	res, err := client.DeleteByQuery().Index(testIndexName).Type("tweet").Query(q).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatalf("expected response != nil; got: %v", res)
+	}
+	idx, found := res.Indices[testIndexName]
+	if !found {
+		t.Errorf("expected Found = true; got: %v", found)
+	}
+	if idx.Shards.Failed > 0 {
+		t.Errorf("expected no failed shards; got: %d", idx.Shards.Failed)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	count, err = client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Fatalf("expected Count = %d; got: %d", 2, count)
+	}
+}

+ 57 - 0
sword_base/olivere/elastic.v1/delete_index.go

@@ -0,0 +1,57 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type DeleteIndexService struct {
+	client *Client
+	index  string
+}
+
+func NewDeleteIndexService(client *Client) *DeleteIndexService {
+	builder := &DeleteIndexService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *DeleteIndexService) Index(index string) *DeleteIndexService {
+	b.index = index
+	return b
+}
+
+func (b *DeleteIndexService) Do() (*DeleteIndexResult, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}/", map[string]string{
+		"index": b.index,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest("DELETE", path, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(DeleteIndexResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a delete index request.
+
+type DeleteIndexResult struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 136 - 0
sword_base/olivere/elastic.v1/delete_mapping.go

@@ -0,0 +1,136 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// DeleteMappingService allows to delete a mapping along with its data.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-delete-mapping.html.
+type DeleteMappingService struct {
+	client        *Client
+	pretty        bool
+	index         []string
+	typ           []string
+	masterTimeout string
+}
+
+// NewDeleteMappingService creates a new DeleteMappingService.
+func NewDeleteMappingService(client *Client) *DeleteMappingService {
+	return &DeleteMappingService{
+		client: client,
+		index:  make([]string, 0),
+		typ:    make([]string, 0),
+	}
+}
+
+// Index is a list of index names (supports wildcards). Use `_all` for all indices.
+func (s *DeleteMappingService) Index(index ...string) *DeleteMappingService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Type is a list of document types to delete (supports wildcards).
+// Use `_all` to delete all document types in the specified indices..
+func (s *DeleteMappingService) Type(typ ...string) *DeleteMappingService {
+	s.typ = append(s.typ, typ...)
+	return s
+}
+
+// MasterTimeout specifies the timeout for connecting to master.
+func (s *DeleteMappingService) MasterTimeout(masterTimeout string) *DeleteMappingService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *DeleteMappingService) Pretty(pretty bool) *DeleteMappingService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteMappingService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+		"index": strings.Join(s.index, ","),
+		"type":  strings.Join(s.typ, ","),
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteMappingService) Validate() error {
+	var invalid []string
+	if len(s.index) == 0 {
+		invalid = append(invalid, "Index")
+	}
+	if len(s.typ) == 0 {
+		invalid = append(invalid, "Type")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *DeleteMappingService) Do() (*DeleteMappingResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("DELETE", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(DeleteMappingResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// DeleteMappingResponse is the response of DeleteMappingService.Do.
+type DeleteMappingResponse struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 40 - 0
sword_base/olivere/elastic.v1/delete_mapping_test.go

@@ -0,0 +1,40 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestDeleteMappingURL(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tests := []struct {
+		Indices  []string
+		Types    []string
+		Expected string
+	}{
+		{
+			[]string{"twitter"},
+			[]string{"tweet"},
+			"/twitter/_mapping/tweet",
+		},
+		{
+			[]string{"store-1", "store-2"},
+			[]string{"tweet", "user"},
+			"/store-1%2Cstore-2/_mapping/tweet%2Cuser",
+		},
+	}
+
+	for _, test := range tests {
+		path, _, err := client.DeleteMapping().Index(test.Indices...).Type(test.Types...).buildURL()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if path != test.Expected {
+			t.Errorf("expected %q; got: %q", test.Expected, path)
+		}
+	}
+}

+ 118 - 0
sword_base/olivere/elastic.v1/delete_template.go

@@ -0,0 +1,118 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// DeleteTemplateService deletes a search template. More information can
+// be found at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type DeleteTemplateService struct {
+	client      *Client
+	pretty      bool
+	id          string
+	version     *int
+	versionType string
+}
+
+// NewDeleteTemplateService creates a new DeleteTemplateService.
+func NewDeleteTemplateService(client *Client) *DeleteTemplateService {
+	return &DeleteTemplateService{
+		client: client,
+	}
+}
+
+// Id is the template ID.
+func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService {
+	s.id = id
+	return s
+}
+
+// Version an explicit version number for concurrency control.
+func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService {
+	s.version = &version
+	return s
+}
+
+// VersionType specifies a version type.
+func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService {
+	s.versionType = versionType
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *DeleteTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+		"id": s.id,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *s.version))
+	}
+	if s.versionType != "" {
+		params.Set("version_type", s.versionType)
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *DeleteTemplateService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *DeleteTemplateService) Do() (*DeleteTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("DELETE", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(DeleteTemplateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// DeleteTemplateResponse is the response of DeleteTemplateService.Do.
+type DeleteTemplateResponse struct {
+	Found   bool   `json:"found"`
+	Index   string `json:"_index"`
+	Type    string `json:"_type"`
+	Id      string `json:"_id"`
+	Version int    `json:"_version"`
+}

+ 83 - 0
sword_base/olivere/elastic.v1/delete_test.go

@@ -0,0 +1,83 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestDelete(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Count documents
+	count, err := client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Errorf("expected Count = %d; got %d", 3, count)
+	}
+
+	// Delete document 1
+	res, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != true {
+		t.Errorf("expected Found = true; got %v", res.Found)
+	}
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	count, err = client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Errorf("expected Count = %d; got %d", 2, count)
+	}
+
+	// Delete non existent document 99
+	res, err = client.Delete().Index(testIndexName).Type("tweet").Id("99").Refresh(true).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != false {
+		t.Errorf("expected Found = false; got %v", res.Found)
+	}
+	count, err = client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 2 {
+		t.Errorf("expected Count = %d; got %d", 2, count)
+	}
+}

+ 51 - 0
sword_base/olivere/elastic.v1/doc.go

@@ -0,0 +1,51 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+/*
+Package elastic provides an interface to the Elasticsearch server
+(http://www.elasticsearch.org/).
+
+The first thing you do is to create a Client. If you have Elasticsearch
+installed and running with its default settings
+(i.e. available at http://127.0.0.1:9200), all you need to do is:
+
+	client, err := elastic.NewClient()
+	if err != nil {
+		// Handle error
+	}
+
+If your Elasticsearch server is running on a different IP and/or port,
+just provide a URL to NewClient:
+
+  // Create a client and connect to http://192.168.2.10:9201
+  client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201"))
+  if err != nil {
+    // Handle error
+  }
+
+You can pass many more configuration parameters to NewClient. Review the
+documentation of NewClient for more information.
+
+If no Elasticsearch server is available, services will fail when creating
+a new request and will return ErrNoClient.
+
+A Client provides services. The services usually come with a variety of
+methods to prepare the query and a Do function to execute it against the
+Elasticsearch REST interface and return a response. Here is an example
+of the IndexExists service that checks if a given index already exists.
+
+	exists, err := client.IndexExists("twitter").Do()
+	if err != nil {
+		// Handle error
+	}
+	if !exists {
+		// Index does not exist yet.
+	}
+
+Look up the documentation for Client to get an idea of the services provided
+and what kinds of responses you get when executing the Do function of a service.
+Also see the wiki on Github for more details.
+
+*/
+package elastic

+ 48 - 0
sword_base/olivere/elastic.v1/errors.go

@@ -0,0 +1,48 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+)
+
+func checkResponse(res *http.Response) error {
+	// 200-299 and 404 are valid status codes
+	if (res.StatusCode >= 200 && res.StatusCode <= 299) || res.StatusCode == http.StatusNotFound {
+		return nil
+	}
+	if res.Body == nil {
+		return fmt.Errorf("elastic: Error %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
+	}
+	slurp, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return fmt.Errorf("elastic: Error %d (%s) when reading body: %v", res.StatusCode, http.StatusText(res.StatusCode), err)
+	}
+	errReply := new(Error)
+	err = json.Unmarshal(slurp, errReply)
+	if err == nil && errReply != nil {
+		if errReply.Status == 0 {
+			errReply.Status = res.StatusCode
+		}
+		return errReply
+	}
+	return nil
+}
+
+type Error struct {
+	Status  int    `json:"status"`
+	Message string `json:"error"`
+}
+
+func (e *Error) Error() string {
+	if e.Message != "" {
+		return fmt.Sprintf("elastic: Error %d (%s): %s", e.Status, http.StatusText(e.Status), e.Message)
+	} else {
+		return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status))
+	}
+}

+ 45 - 0
sword_base/olivere/elastic.v1/errors_test.go

@@ -0,0 +1,45 @@
+package elastic
+
+import (
+	"bufio"
+	"fmt"
+	"net/http"
+	"strings"
+	"testing"
+)
+
+func TestResponseError(t *testing.T) {
+	message := "Something went seriously wrong."
+	raw := "HTTP/1.1 500 Internal Server Error\r\n" +
+		"\r\n" +
+		`{"status":500,"error":"` + message + `"}` + "\r\n"
+	r := bufio.NewReader(strings.NewReader(raw))
+
+	resp, err := http.ReadResponse(r, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	err = checkResponse(resp)
+	if err == nil {
+		t.Fatalf("expected error; got: %v", err)
+	}
+
+	// Check for correct error message
+	expected := fmt.Sprintf("elastic: Error %d (%s): %s", resp.StatusCode, http.StatusText(resp.StatusCode), message)
+	got := err.Error()
+	if got != expected {
+		t.Fatalf("expected %q; got: %q", expected, got)
+	}
+
+	// Check that error is of type *elastic.Error, which contains additional information
+	e, ok := err.(*Error)
+	if !ok {
+		t.Fatal("expected error to be of type *elastic.Error")
+	}
+	if e.Status != resp.StatusCode {
+		t.Fatalf("expected status code %d; got: %d", resp.StatusCode, e.Status)
+	}
+	if e.Message != message {
+		t.Fatalf("expected error message %q; got: %q", message, e.Message)
+	}
+}

+ 547 - 0
sword_base/olivere/elastic.v1/example_test.go

@@ -0,0 +1,547 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic_test
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"time"
+
+	"github.com/olivere/elastic"
+)
+
+type Tweet struct {
+	User     string                `json:"user"`
+	Message  string                `json:"message"`
+	Retweets int                   `json:"retweets"`
+	Image    string                `json:"image,omitempty"`
+	Created  time.Time             `json:"created,omitempty"`
+	Tags     []string              `json:"tags,omitempty"`
+	Location string                `json:"location,omitempty"`
+	Suggest  *elastic.SuggestField `json:"suggest_field,omitempty"`
+}
+
+func Example() {
+	errorlog := log.New(os.Stdout, "APP ", log.LstdFlags)
+
+	// Obtain a client. You can provide your own HTTP client here.
+	client, err := elastic.NewClient(elastic.SetErrorLog(errorlog))
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// Trace request and response details like this
+	//client.SetTracer(log.New(os.Stdout, "", 0))
+
+	// Ping the Elasticsearch server to get e.g. the version number
+	info, code, err := client.Ping().Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number)
+
+	// Getting the ES version number is quite common, so there's a shortcut
+	esversion, err := client.ElasticsearchVersion("http://127.0.0.1:9200")
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("Elasticsearch version %s", esversion)
+
+	// Use the IndexExists service to check if a specified index exists.
+	exists, err := client.IndexExists("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if !exists {
+		// Create a new index.
+		createIndex, err := client.CreateIndex("twitter").Do()
+		if err != nil {
+			// Handle error
+			panic(err)
+		}
+		if !createIndex.Acknowledged {
+			// Not acknowledged
+		}
+	}
+
+	// Index a tweet (using JSON serialization)
+	tweet1 := Tweet{User: "olivere", Message: "Take Five", Retweets: 0}
+	put1, err := client.Index().
+		Index("twitter").
+		Type("tweet").
+		Id("1").
+		BodyJson(tweet1).
+		Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("Indexed tweet %s to index %s, type %s\n", put1.Id, put1.Index, put1.Type)
+
+	// Index a second tweet (by string)
+	tweet2 := `{"user" : "olivere", "message" : "It's a Raggy Waltz"}`
+	put2, err := client.Index().
+		Index("twitter").
+		Type("tweet").
+		Id("2").
+		BodyString(tweet2).
+		Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("Indexed tweet %s to index %s, type %s\n", put2.Id, put2.Index, put2.Type)
+
+	// Get tweet with specified ID
+	get1, err := client.Get().
+		Index("twitter").
+		Type("tweet").
+		Id("1").
+		Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if get1.Found {
+		fmt.Printf("Got document %s in version %d from index %s, type %s\n", get1.Id, get1.Version, get1.Index, get1.Type)
+	}
+
+	// Flush to make sure the documents got written.
+	_, err = client.Flush().Index("twitter").Do()
+	if err != nil {
+		panic(err)
+	}
+
+	// Search with a term query
+	termQuery := elastic.NewTermQuery("user", "olivere")
+	searchResult, err := client.Search().
+		Index("twitter").   // search in index "twitter"
+		Query(&termQuery).  // specify the query
+		Sort("user", true). // sort by "user" field, ascending
+		From(0).Size(10).   // take documents 0-9
+		Pretty(true).       // pretty print request and response JSON
+		Do()                // execute
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// searchResult is of type SearchResult and returns hits, suggestions,
+	// and all kinds of other information from Elasticsearch.
+	fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+	// Each is a convenience function that iterates over hits in a search result.
+	// It makes sure you don't need to check for nil values in the response.
+	// However, it ignores errors in serialization. If you want full control
+	// over iterating the hits, see below.
+	var ttyp Tweet
+	for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+		t := item.(Tweet)
+		fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+	}
+	// TotalHits is another convenience function that works even when something goes wrong.
+	fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+	// Here's how you iterate through results with full control over each step.
+	if searchResult.Hits != nil {
+		fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+		// Iterate through results
+		for _, hit := range searchResult.Hits.Hits {
+			// hit.Index contains the name of the index
+
+			// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+			var t Tweet
+			err := json.Unmarshal(*hit.Source, &t)
+			if err != nil {
+				// Deserialization failed
+			}
+
+			// Work with tweet
+			fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+		}
+	} else {
+		// No hits
+		fmt.Print("Found no tweets\n")
+	}
+
+	// Update a tweet by the update API of Elasticsearch.
+	// We just increment the number of retweets.
+	update, err := client.Update().Index("twitter").Type("tweet").Id("1").
+		Script("ctx._source.retweets += num").
+		ScriptParams(map[string]interface{}{"num": 1}).
+		Upsert(map[string]interface{}{"retweets": 0}).
+		Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	fmt.Printf("New version of tweet %q is now %d", update.Id, update.Version)
+
+	// ...
+
+	// Delete an index.
+	deleteIndex, err := client.DeleteIndex("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if !deleteIndex.Acknowledged {
+		// Not acknowledged
+	}
+}
+
+func ExampleClient_NewClient_default() {
+	// Obtain a client to the Elasticsearch instance on http://localhost:9200.
+	client, err := elastic.NewClient()
+	if err != nil {
+		// Handle error
+		fmt.Printf("connection failed: %v\n", err)
+	} else {
+		fmt.Println("connected")
+	}
+	_ = client
+	// Output:
+	// connected
+}
+
+func ExampleClient_NewClient_cluster() {
+	// Obtain a client for an Elasticsearch cluster of two nodes,
+	// running on 10.0.1.1 and 10.0.1.2.
+	client, err := elastic.NewClient(elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"))
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	_ = client
+}
+
+func ExampleClient_NewClient_manyOptions() {
+	// Obtain a client for an Elasticsearch cluster of two nodes,
+	// running on 10.0.1.1 and 10.0.1.2. Do not run the sniffer.
+	// Set the healthcheck interval to 10s. When requests fail,
+	// retry 5 times. Print error messages to os.Stderr and informational
+	// messages to os.Stdout.
+	client, err := elastic.NewClient(
+		elastic.SetURL("http://10.0.1.1:9200", "http://10.0.1.2:9200"),
+		elastic.SetSniff(false),
+		elastic.SetHealthcheckInterval(10*time.Second),
+		elastic.SetMaxRetries(5),
+		elastic.SetErrorLog(log.New(os.Stderr, "ELASTIC ", log.LstdFlags)),
+		elastic.SetInfoLog(log.New(os.Stdout, "", log.LstdFlags)))
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	_ = client
+}
+
+func ExampleIndexExistsService() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	// Use the IndexExists service to check if the index "twitter" exists.
+	exists, err := client.IndexExists("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if exists {
+		// ...
+	}
+}
+
+func ExampleCreateIndexService() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	// Create a new index.
+	createIndex, err := client.CreateIndex("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if !createIndex.Acknowledged {
+		// Not acknowledged
+	}
+}
+
+func ExampleDeleteIndexService() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	// Delete an index.
+	deleteIndex, err := client.DeleteIndex("twitter").Do()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+	if !deleteIndex.Acknowledged {
+		// Not acknowledged
+	}
+}
+
+func ExampleSearchService() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// Search with a term query
+	termQuery := elastic.NewTermQuery("user", "olivere")
+	searchResult, err := client.Search().
+		Index("twitter").   // search in index "twitter"
+		Query(&termQuery).  // specify the query
+		Sort("user", true). // sort by "user" field, ascending
+		From(0).Size(10).   // take documents 0-9
+		Pretty(true).       // pretty print request and response JSON
+		Do()                // execute
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// searchResult is of type SearchResult and returns hits, suggestions,
+	// and all kinds of other information from Elasticsearch.
+	fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+	// Number of hits
+	if searchResult.Hits != nil {
+		fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+		// Iterate through results
+		for _, hit := range searchResult.Hits.Hits {
+			// hit.Index contains the name of the index
+
+			// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+			var t Tweet
+			err := json.Unmarshal(*hit.Source, &t)
+			if err != nil {
+				// Deserialization failed
+			}
+
+			// Work with tweet
+			fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+		}
+	} else {
+		// No hits
+		fmt.Print("Found no tweets\n")
+	}
+}
+
+func ExampleAggregations() {
+	// Get a client to the local Elasticsearch instance.
+	client, err := elastic.NewClient()
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// Create an aggregation for users and a sub-aggregation for a date histogram of tweets (per year).
+	timeline := elastic.NewTermsAggregation().Field("user").Size(10).OrderByCountDesc()
+	histogram := elastic.NewDateHistogramAggregation().Field("created").Interval("year")
+	timeline = timeline.SubAggregation("history", histogram)
+
+	// Search with a term query
+	searchResult, err := client.Search().
+		Index("twitter").                  // search in index "twitter"
+		Query(elastic.NewMatchAllQuery()). // return all results, but ...
+		SearchType("count").               // ... do not return hits, just the count
+		Aggregation("timeline", timeline). // add our aggregation to the query
+		Pretty(true).                      // pretty print request and response JSON
+		Do()                               // execute
+	if err != nil {
+		// Handle error
+		panic(err)
+	}
+
+	// Access "timeline" aggregate in search result.
+	agg, found := searchResult.Aggregations.Terms("timeline")
+	if !found {
+		log.Fatalf("we sould have a terms aggregation called %q", "timeline")
+	}
+	for _, userBucket := range agg.Buckets {
+		// Every bucket should have the user field as key.
+		user := userBucket.Key
+
+		// The sub-aggregation history should have the number of tweets per year.
+		histogram, found := userBucket.DateHistogram("history")
+		if found {
+			for _, year := range histogram.Buckets {
+				fmt.Printf("user %q has %d tweets in %q\n", user, year.DocCount, year.KeyAsString)
+			}
+		}
+	}
+}
+
+func ExampleSearchResult() {
+	client, err := elastic.NewClient()
+	if err != nil {
+		panic(err)
+	}
+
+	// Do a search
+	searchResult, err := client.Search().Index("twitter").Query(elastic.NewMatchAllQuery()).Do()
+	if err != nil {
+		panic(err)
+	}
+
+	// searchResult is of type SearchResult and returns hits, suggestions,
+	// and all kinds of other information from Elasticsearch.
+	fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis)
+
+	// Each is a utility function that iterates over hits in a search result.
+	// It makes sure you don't need to check for nil values in the response.
+	// However, it ignores errors in serialization. If you want full control
+	// over iterating the hits, see below.
+	var ttyp Tweet
+	for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) {
+		t := item.(Tweet)
+		fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+	}
+	fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits())
+
+	// Here's how you iterate hits with full control.
+	if searchResult.Hits != nil {
+		fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits)
+
+		// Iterate through results
+		for _, hit := range searchResult.Hits.Hits {
+			// hit.Index contains the name of the index
+
+			// Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}).
+			var t Tweet
+			err := json.Unmarshal(*hit.Source, &t)
+			if err != nil {
+				// Deserialization failed
+			}
+
+			// Work with tweet
+			fmt.Printf("Tweet by %s: %s\n", t.User, t.Message)
+		}
+	} else {
+		// No hits
+		fmt.Print("Found no tweets\n")
+	}
+}
+
+func ExamplePutTemplateService() {
+	client, err := elastic.NewClient()
+	if err != nil {
+		panic(err)
+	}
+
+	// Create search template
+	tmpl := `{"template":{"query":{"match":{"title":"{{query_string}}"}}}}`
+
+	// Create template
+	resp, err := client.PutTemplate().
+		Id("my-search-template"). // Name of the template
+		BodyString(tmpl).         // Search template itself
+		Do()                      // Execute
+	if err != nil {
+		panic(err)
+	}
+	if resp.Created {
+		fmt.Println("search template created")
+	}
+}
+
+func ExampleGetTemplateService() {
+	client, err := elastic.NewClient()
+	if err != nil {
+		panic(err)
+	}
+
+	// Get template stored under "my-search-template"
+	resp, err := client.GetTemplate().Id("my-search-template").Do()
+	if err != nil {
+		panic(err)
+	}
+	fmt.Printf("search template is: %q\n", resp.Template)
+}
+
+func ExampleDeleteTemplateService() {
+	client, err := elastic.NewClient()
+	if err != nil {
+		panic(err)
+	}
+
+	// Delete template
+	resp, err := client.DeleteTemplate().Id("my-search-template").Do()
+	if err != nil {
+		panic(err)
+	}
+	if resp != nil && resp.Found {
+		fmt.Println("template deleted")
+	}
+}
+
+func ExampleClusterHealthService() {
+	client, err := elastic.NewClient()
+	if err != nil {
+		panic(err)
+	}
+
+	// Get cluster health
+	res, err := client.ClusterHealth().Index("twitter").Do()
+	if err != nil {
+		panic(err)
+	}
+	if res == nil {
+		panic(err)
+	}
+	fmt.Printf("Cluster status is %q\n", res.Status)
+}
+
+func ExampleClusterHealthService_WaitForGreen() {
+	client, err := elastic.NewClient()
+	if err != nil {
+		panic(err)
+	}
+
+	// Wait for status green
+	res, err := client.ClusterHealth().WaitForStatus("green").Timeout("15s").Do()
+	if err != nil {
+		panic(err)
+	}
+	if res.TimedOut {
+		fmt.Printf("time out waiting for cluster status %q\n", "green")
+	} else {
+		fmt.Printf("cluster status is %q\n", res.Status)
+	}
+}
+
+func ExampleClusterStateService() {
+	client, err := elastic.NewClient()
+	if err != nil {
+		panic(err)
+	}
+
+	// Get cluster state
+	res, err := client.ClusterState().Metric("version").Do()
+	if err != nil {
+		panic(err)
+	}
+	fmt.Printf("Cluster %q has version %d", res.ClusterName, res.Version)
+}

+ 71 - 0
sword_base/olivere/elastic.v1/exists.go

@@ -0,0 +1,71 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type ExistsService struct {
+	client *Client
+	index  string
+	_type  string
+	id     string
+}
+
+func NewExistsService(client *Client) *ExistsService {
+	builder := &ExistsService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *ExistsService) String() string {
+	return fmt.Sprintf("exists([%v][%v][%v])",
+		s.index,
+		s._type,
+		s.id)
+}
+
+func (s *ExistsService) Index(index string) *ExistsService {
+	s.index = index
+	return s
+}
+
+func (s *ExistsService) Type(_type string) *ExistsService {
+	s._type = _type
+	return s
+}
+
+func (s *ExistsService) Id(id string) *ExistsService {
+	s.id = id
+	return s
+}
+
+func (s *ExistsService) Do() (bool, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+		"index": s.index,
+		"type":  s._type,
+		"id":    s.id,
+	})
+	if err != nil {
+		return false, err
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("HEAD", path, nil, nil)
+	if err != nil {
+		return false, err
+	}
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 329 - 0
sword_base/olivere/elastic.v1/explain.go

@@ -0,0 +1,329 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// ExplainService computes a score explanation for a query and
+// a specific document.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-explain.html.
+type ExplainService struct {
+	client                 *Client
+	pretty                 bool
+	id                     string
+	index                  string
+	typ                    string
+	q                      string
+	routing                string
+	lenient                *bool
+	analyzer               string
+	df                     string
+	fields                 []string
+	lowercaseExpandedTerms *bool
+	xSourceInclude         []string
+	analyzeWildcard        *bool
+	parent                 string
+	preference             string
+	xSource                []string
+	defaultOperator        string
+	xSourceExclude         []string
+	source                 string
+	bodyJson               interface{}
+	bodyString             string
+}
+
+// NewExplainService creates a new ExplainService.
+func NewExplainService(client *Client) *ExplainService {
+	return &ExplainService{
+		client:         client,
+		xSource:        make([]string, 0),
+		xSourceExclude: make([]string, 0),
+		fields:         make([]string, 0),
+		xSourceInclude: make([]string, 0),
+	}
+}
+
+// Id is the document ID.
+func (s *ExplainService) Id(id string) *ExplainService {
+	s.id = id
+	return s
+}
+
+// Index is the name of the index.
+func (s *ExplainService) Index(index string) *ExplainService {
+	s.index = index
+	return s
+}
+
+// Type is the type of the document.
+func (s *ExplainService) Type(typ string) *ExplainService {
+	s.typ = typ
+	return s
+}
+
+// Source is the URL-encoded query definition (instead of using the request body).
+func (s *ExplainService) Source(source string) *ExplainService {
+	s.source = source
+	return s
+}
+
+// XSourceExclude is a list of fields to exclude from the returned _source field.
+func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService {
+	s.xSourceExclude = make([]string, 0)
+	s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...)
+	return s
+}
+
+// Lenient specifies whether format-based query failures
+// (such as providing text to a numeric field) should be ignored.
+func (s *ExplainService) Lenient(lenient bool) *ExplainService {
+	s.lenient = &lenient
+	return s
+}
+
+// Query in the Lucene query string syntax.
+func (s *ExplainService) Q(q string) *ExplainService {
+	s.q = q
+	return s
+}
+
+// Routing sets a specific routing value.
+func (s *ExplainService) Routing(routing string) *ExplainService {
+	s.routing = routing
+	return s
+}
+
+// AnalyzeWildcard specifies whether wildcards and prefix queries
+// in the query string query should be analyzed (default: false).
+func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService {
+	s.analyzeWildcard = &analyzeWildcard
+	return s
+}
+
+// Analyzer is the analyzer for the query string query.
+func (s *ExplainService) Analyzer(analyzer string) *ExplainService {
+	s.analyzer = analyzer
+	return s
+}
+
+// Df is the default field for query string query (default: _all).
+func (s *ExplainService) Df(df string) *ExplainService {
+	s.df = df
+	return s
+}
+
+// Fields is a list of fields to return in the response.
+func (s *ExplainService) Fields(fields ...string) *ExplainService {
+	s.fields = make([]string, 0)
+	s.fields = append(s.fields, fields...)
+	return s
+}
+
+// LowercaseExpandedTerms specifies whether query terms should be lowercased.
+func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService {
+	s.lowercaseExpandedTerms = &lowercaseExpandedTerms
+	return s
+}
+
+// XSourceInclude is a list of fields to extract and return from the _source field.
+func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService {
+	s.xSourceInclude = make([]string, 0)
+	s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...)
+	return s
+}
+
+// DefaultOperator is the default operator for query string query (AND or OR).
+func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService {
+	s.defaultOperator = defaultOperator
+	return s
+}
+
+// Parent is the ID of the parent document.
+func (s *ExplainService) Parent(parent string) *ExplainService {
+	s.parent = parent
+	return s
+}
+
+// Preference specifies the node or shard the operation should be performed on (default: random).
+func (s *ExplainService) Preference(preference string) *ExplainService {
+	s.preference = preference
+	return s
+}
+
+// XSource is true or false to return the _source field or not, or a list of fields to return.
+func (s *ExplainService) XSource(xSource ...string) *ExplainService {
+	s.xSource = make([]string, 0)
+	s.xSource = append(s.xSource, xSource...)
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *ExplainService) Pretty(pretty bool) *ExplainService {
+	s.pretty = pretty
+	return s
+}
+
+// Query sets a query definition using the Query DSL.
+func (s *ExplainService) Query(query Query) *ExplainService {
+	body := make(map[string]interface{})
+	body["query"] = query.Source()
+	s.bodyJson = body
+	return s
+}
+
+// BodyJson sets the query definition using the Query DSL.
+func (s *ExplainService) BodyJson(body interface{}) *ExplainService {
+	s.bodyJson = body
+	return s
+}
+
+// BodyString sets the query definition using the Query DSL as a string.
+func (s *ExplainService) BodyString(body string) *ExplainService {
+	s.bodyString = body
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *ExplainService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{
+		"id":    s.id,
+		"index": s.index,
+		"type":  s.typ,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if len(s.xSource) > 0 {
+		params.Set("_source", strings.Join(s.xSource, ","))
+	}
+	if s.defaultOperator != "" {
+		params.Set("default_operator", s.defaultOperator)
+	}
+	if s.parent != "" {
+		params.Set("parent", s.parent)
+	}
+	if s.preference != "" {
+		params.Set("preference", s.preference)
+	}
+	if s.source != "" {
+		params.Set("source", s.source)
+	}
+	if len(s.xSourceExclude) > 0 {
+		params.Set("_source_exclude", strings.Join(s.xSourceExclude, ","))
+	}
+	if s.lenient != nil {
+		params.Set("lenient", fmt.Sprintf("%v", *s.lenient))
+	}
+	if s.q != "" {
+		params.Set("q", s.q)
+	}
+	if s.routing != "" {
+		params.Set("routing", s.routing)
+	}
+	if len(s.fields) > 0 {
+		params.Set("fields", strings.Join(s.fields, ","))
+	}
+	if s.lowercaseExpandedTerms != nil {
+		params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms))
+	}
+	if len(s.xSourceInclude) > 0 {
+		params.Set("_source_include", strings.Join(s.xSourceInclude, ","))
+	}
+	if s.analyzeWildcard != nil {
+		params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard))
+	}
+	if s.analyzer != "" {
+		params.Set("analyzer", s.analyzer)
+	}
+	if s.df != "" {
+		params.Set("df", s.df)
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *ExplainService) Validate() error {
+	var invalid []string
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if s.typ == "" {
+		invalid = append(invalid, "Type")
+	}
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *ExplainService) Do() (*ExplainResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Setup HTTP request body
+	var body interface{}
+	if s.bodyJson != nil {
+		body = s.bodyJson
+	} else {
+		body = s.bodyString
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, body)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(ExplainResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// ExplainResponse is the response of ExplainService.Do.
+type ExplainResponse struct {
+	Index       string                 `json:"_index"`
+	Type        string                 `json:"_type"`
+	Id          string                 `json:"_id"`
+	Matched     bool                   `json:"matched"`
+	Explanation map[string]interface{} `json:"explanation"`
+}

+ 41 - 0
sword_base/olivere/elastic.v1/explain_test.go

@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import "testing"
+
+func TestExplain(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+	// Add a document
+	indexResult, err := client.Index().
+		Index(testIndexName).
+		Type("tweet").
+		Id("1").
+		BodyJson(&tweet1).
+		Refresh(true).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", indexResult)
+	}
+
+	// Explain
+	query := NewTermQuery("user", "olivere")
+	expl, err := client.Explain(testIndexName, "tweet", "1").Query(query).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if expl == nil {
+		t.Fatal("expected to return an explanation")
+	}
+	if !expl.Matched {
+		t.Errorf("expected matched to be %v; got: %v", true, expl.Matched)
+	}
+}

+ 74 - 0
sword_base/olivere/elastic.v1/fetch_source_context.go

@@ -0,0 +1,74 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"net/url"
+	"strings"
+)
+
+type FetchSourceContext struct {
+	fetchSource     bool
+	transformSource bool
+	includes        []string
+	excludes        []string
+}
+
+func NewFetchSourceContext(fetchSource bool) *FetchSourceContext {
+	return &FetchSourceContext{
+		fetchSource: fetchSource,
+		includes:    make([]string, 0),
+		excludes:    make([]string, 0),
+	}
+}
+
+func (fsc *FetchSourceContext) FetchSource() bool {
+	return fsc.fetchSource
+}
+
+func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) {
+	fsc.fetchSource = fetchSource
+}
+
+func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext {
+	fsc.includes = append(fsc.includes, includes...)
+	return fsc
+}
+
+func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext {
+	fsc.excludes = append(fsc.excludes, excludes...)
+	return fsc
+}
+
+func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext {
+	fsc.transformSource = transformSource
+	return fsc
+}
+
+func (fsc *FetchSourceContext) Source() interface{} {
+	if !fsc.fetchSource {
+		return false
+	}
+	return map[string]interface{}{
+		"includes": fsc.includes,
+		"excludes": fsc.excludes,
+	}
+}
+
+// Query returns the parameters in a form suitable for a URL query string.
+func (fsc *FetchSourceContext) Query() url.Values {
+	params := url.Values{}
+	if !fsc.fetchSource {
+		params.Add("_source", "false")
+		return params
+	}
+	if len(fsc.includes) > 0 {
+		params.Add("_source_include", strings.Join(fsc.includes, ","))
+	}
+	if len(fsc.excludes) > 0 {
+		params.Add("_source_exclude", strings.Join(fsc.excludes, ","))
+	}
+	return params
+}

+ 92 - 0
sword_base/olivere/elastic.v1/fetch_source_context_test.go

@@ -0,0 +1,92 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestFetchSourceContextNoFetchSource(t *testing.T) {
+	builder := NewFetchSourceContext(false)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `false`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFetchSourceContextNoFetchSourceIgnoreIncludesAndExcludes(t *testing.T) {
+	builder := NewFetchSourceContext(false).Include("a", "b").Exclude("c")
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `false`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFetchSourceContextFetchSource(t *testing.T) {
+	builder := NewFetchSourceContext(true)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"excludes":[],"includes":[]}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFetchSourceContextFetchSourceWithIncludesAndExcludes(t *testing.T) {
+	builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"excludes":["c"],"includes":["a","b"]}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestFetchSourceContextQueryDefaults(t *testing.T) {
+	builder := NewFetchSourceContext(true)
+	values := builder.Query()
+	got := values.Encode()
+	expected := ""
+	if got != expected {
+		t.Errorf("expected %q; got: %q", expected, got)
+	}
+}
+
+func TestFetchSourceContextQueryNoFetchSource(t *testing.T) {
+	builder := NewFetchSourceContext(false)
+	values := builder.Query()
+	got := values.Encode()
+	expected := "_source=false"
+	if got != expected {
+		t.Errorf("expected %q; got: %q", expected, got)
+	}
+}
+
+func TestFetchSourceContextQueryFetchSourceWithIncludesAndExcludes(t *testing.T) {
+	builder := NewFetchSourceContext(true).Include("a", "b").Exclude("c")
+	values := builder.Query()
+	got := values.Encode()
+	expected := "_source_exclude=c&_source_include=a%2Cb"
+	if got != expected {
+		t.Errorf("expected %q; got: %q", expected, got)
+	}
+}

+ 9 - 0
sword_base/olivere/elastic.v1/filter.go

@@ -0,0 +1,9 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+type Filter interface {
+	Source() interface{}
+}

+ 167 - 0
sword_base/olivere/elastic.v1/flush.go

@@ -0,0 +1,167 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// Flush allows to flush one or more indices. The flush process of an index
+// basically frees memory from the index by flushing data to the index
+// storage and clearing the internal transaction log.
+//
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html
+// for details.
+type FlushService struct {
+	client *Client
+
+	indices           []string
+	force             *bool
+	full              *bool
+	waitIfOngoing     *bool
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+}
+
+func NewFlushService(client *Client) *FlushService {
+	builder := &FlushService{
+		client: client,
+	}
+	return builder
+}
+
+func (s *FlushService) Index(index string) *FlushService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, index)
+	return s
+}
+
+func (s *FlushService) Indices(indices ...string) *FlushService {
+	if s.indices == nil {
+		s.indices = make([]string, 0)
+	}
+	s.indices = append(s.indices, indices...)
+	return s
+}
+
+// Force specifies whether to force a flush even if it is not necessary.
+func (s *FlushService) Force(force bool) *FlushService {
+	s.force = &force
+	return s
+}
+
+// Full, when set to true, creates a new index writer for the index and
+// refreshes all settings related to the index.
+func (s *FlushService) Full(full bool) *FlushService {
+	s.full = &full
+	return s
+}
+
+// WaitIfOngoing will block until the flush can be executed (if set to true)
+// if another flush operation is already executing. The default is false
+// and will cause an exception to be thrown on the shard level if another
+// flush operation is already running. [1.4.0.Beta1]
+func (s *FlushService) WaitIfOngoing(wait bool) *FlushService {
+	s.waitIfOngoing = &wait
+	return s
+}
+
+// IgnoreUnavailable specifies whether concrete indices should be ignored
+// when unavailable (e.g. missing or closed).
+func (s *FlushService) IgnoreUnavailable(ignoreUnavailable bool) *FlushService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices specifies whether to ignore if a wildcard expression
+// yields no indices. This includes the _all index or when no indices
+// have been specified.
+func (s *FlushService) AllowNoIndices(allowNoIndices bool) *FlushService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards specifies whether to expand wildcards to concrete indices
+// that are open, closed, or both. Use one of "open", "closed", "none", or "all".
+func (s *FlushService) ExpandWildcards(expandWildcards string) *FlushService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// Do executes the service.
+func (s *FlushService) Do() (*FlushResult, error) {
+	// Build url
+	path := "/"
+
+	// Indices part
+	if len(s.indices) > 0 {
+		indexPart := make([]string, 0)
+		for _, index := range s.indices {
+			index, err := uritemplates.Expand("{index}", map[string]string{
+				"index": index,
+			})
+			if err != nil {
+				return nil, err
+			}
+			indexPart = append(indexPart, index)
+		}
+		path += strings.Join(indexPart, ",") + "/"
+	}
+	path += "_flush"
+
+	// Parameters
+	params := make(url.Values)
+	if s.force != nil {
+		params.Set("force", fmt.Sprintf("%v", *s.force))
+	}
+	if s.full != nil {
+		params.Set("full", fmt.Sprintf("%v", *s.full))
+	}
+	if s.waitIfOngoing != nil {
+		params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing))
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+
+	// Get response
+	res, err := s.client.PerformRequest("POST", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(FlushResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a flush request.
+
+type shardsInfo struct {
+	Total      int `json:"total"`
+	Successful int `json:"successful"`
+	Failed     int `json:"failed"`
+}
+
+type FlushResult struct {
+	Shards shardsInfo `json:"_shards"`
+}

+ 22 - 0
sword_base/olivere/elastic.v1/flush_test.go

@@ -0,0 +1,22 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestFlush(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	// Flush all indices
+	res, err := client.Flush().Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Errorf("expected res to be != nil; got: %v", res)
+	}
+}

+ 47 - 0
sword_base/olivere/elastic.v1/geo_point.go

@@ -0,0 +1,47 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// GeoPoint is a geographic position described via latitude and longitude.
+type GeoPoint struct {
+	Lat, Lon float64
+}
+
+// Source returns the object to be serialized in Elasticsearch DSL.
+func (pt *GeoPoint) Source() map[string]float64 {
+	return map[string]float64{
+		"lat": pt.Lat,
+		"lon": pt.Lon,
+	}
+}
+
+// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude.
+func GeoPointFromLatLon(lat, lon float64) *GeoPoint {
+	return &GeoPoint{Lat: lat, Lon: lon}
+}
+
+// GeoPointFromString initializes a new GeoPoint by a string that is
+// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091".
+func GeoPointFromString(latLon string) (*GeoPoint, error) {
+	latlon := strings.SplitN(latLon, ",", 2)
+	if len(latlon) != 2 {
+		return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon)
+	}
+	lat, err := strconv.ParseFloat(latlon[0], 64)
+	if err != nil {
+		return nil, err
+	}
+	lon, err := strconv.ParseFloat(latlon[1], 64)
+	if err != nil {
+		return nil, err
+	}
+	return &GeoPoint{Lat: lat, Lon: lon}, nil
+}

+ 24 - 0
sword_base/olivere/elastic.v1/geo_point_test.go

@@ -0,0 +1,24 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestGeoPointSource(t *testing.T) {
+	pt := GeoPoint{Lat: 40, Lon: -70}
+
+	data, err := json.Marshal(pt.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"lat":40,"lon":-70}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}

+ 223 - 0
sword_base/olivere/elastic.v1/get.go

@@ -0,0 +1,223 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type GetService struct {
+	client                        *Client
+	index                         string
+	typ                           string
+	id                            string
+	routing                       string
+	preference                    string
+	fields                        []string
+	refresh                       *bool
+	realtime                      *bool
+	fsc                           *FetchSourceContext
+	versionType                   string
+	version                       *int64
+	ignoreErrorsOnGeneratedFields *bool
+}
+
+func NewGetService(client *Client) *GetService {
+	builder := &GetService{
+		client: client,
+		typ:    "_all",
+	}
+	return builder
+}
+
+func (b *GetService) String() string {
+	return fmt.Sprintf("[%v][%v][%v]: routing [%v]",
+		b.index,
+		b.typ,
+		b.id,
+		b.routing)
+}
+
+func (b *GetService) Index(index string) *GetService {
+	b.index = index
+	return b
+}
+
+func (b *GetService) Type(typ string) *GetService {
+	b.typ = typ
+	return b
+}
+
+func (b *GetService) Id(id string) *GetService {
+	b.id = id
+	return b
+}
+
+func (b *GetService) Parent(parent string) *GetService {
+	if b.routing == "" {
+		b.routing = parent
+	}
+	return b
+}
+
+func (b *GetService) Routing(routing string) *GetService {
+	b.routing = routing
+	return b
+}
+
+func (b *GetService) Preference(preference string) *GetService {
+	b.preference = preference
+	return b
+}
+
+func (b *GetService) Fields(fields ...string) *GetService {
+	if b.fields == nil {
+		b.fields = make([]string, 0)
+	}
+	b.fields = append(b.fields, fields...)
+	return b
+}
+
+func (s *GetService) FetchSource(fetchSource bool) *GetService {
+	if s.fsc == nil {
+		s.fsc = NewFetchSourceContext(fetchSource)
+	} else {
+		s.fsc.SetFetchSource(fetchSource)
+	}
+	return s
+}
+
+func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService {
+	s.fsc = fetchSourceContext
+	return s
+}
+
+func (b *GetService) Refresh(refresh bool) *GetService {
+	b.refresh = &refresh
+	return b
+}
+
+func (b *GetService) Realtime(realtime bool) *GetService {
+	b.realtime = &realtime
+	return b
+}
+
+func (b *GetService) VersionType(versionType string) *GetService {
+	b.versionType = versionType
+	return b
+}
+
+func (b *GetService) Version(version int64) *GetService {
+	b.version = &version
+	return b
+}
+
+func (b *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService {
+	b.ignoreErrorsOnGeneratedFields = &ignore
+	return b
+}
+
+// Validate checks if the operation is valid.
+func (s *GetService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if s.typ == "" {
+		invalid = append(invalid, "Type")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+func (b *GetService) Do() (*GetResult, error) {
+	// Check pre-conditions
+	if err := b.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Build url
+	path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{
+		"index": b.index,
+		"type":  b.typ,
+		"id":    b.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	params := make(url.Values)
+	if b.realtime != nil {
+		params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+	}
+	if len(b.fields) > 0 {
+		params.Add("fields", strings.Join(b.fields, ","))
+	}
+	if b.routing != "" {
+		params.Add("routing", b.routing)
+	}
+	if b.preference != "" {
+		params.Add("preference", b.preference)
+	}
+	if b.refresh != nil {
+		params.Add("refresh", fmt.Sprintf("%v", *b.refresh))
+	}
+	if b.realtime != nil {
+		params.Add("realtime", fmt.Sprintf("%v", *b.realtime))
+	}
+	if b.ignoreErrorsOnGeneratedFields != nil {
+		params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *b.ignoreErrorsOnGeneratedFields))
+	}
+	if len(b.fields) > 0 {
+		params.Add("_fields", strings.Join(b.fields, ","))
+	}
+	if b.version != nil {
+		params.Add("version", fmt.Sprintf("%d", *b.version))
+	}
+	if b.versionType != "" {
+		params.Add("version_type", b.versionType)
+	}
+	if b.fsc != nil {
+		for k, values := range b.fsc.Query() {
+			params.Add(k, strings.Join(values, ","))
+		}
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(GetResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// -- Result of a get request.
+
+type GetResult struct {
+	Index   string           `json:"_index"`
+	Type    string           `json:"_type"`
+	Id      string           `json:"_id"`
+	Version int64            `json:"_version,omitempty"`
+	Source  *json.RawMessage `json:"_source,omitempty"`
+	Found   bool             `json:"found,omitempty"`
+	Fields  []string         `json:"fields,omitempty"`
+	Error   string           `json:"error,omitempty"` // used only in MultiGet
+}

+ 172 - 0
sword_base/olivere/elastic.v1/get_mapping.go

@@ -0,0 +1,172 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// GetMappingService retrieves the mapping definitions for an index or
+// index/type. See at http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-mapping.html.
+type GetMappingService struct {
+	client            *Client
+	pretty            bool
+	index             []string
+	typ               []string
+	local             *bool
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+}
+
+// NewGetMappingService creates a new GetMappingService.
+func NewGetMappingService(client *Client) *GetMappingService {
+	return &GetMappingService{
+		client: client,
+		index:  make([]string, 0),
+		typ:    make([]string, 0),
+	}
+}
+
+// Index is a list of index names.
+func (s *GetMappingService) Index(index ...string) *GetMappingService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Type is a list of document types.
+func (s *GetMappingService) Type(typ ...string) *GetMappingService {
+	s.typ = append(s.typ, typ...)
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// This includes `_all` string or when no indices have been specified.
+func (s *GetMappingService) AllowNoIndices(allowNoIndices bool) *GetMappingService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *GetMappingService) ExpandWildcards(expandWildcards string) *GetMappingService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *GetMappingService) Local(local bool) *GetMappingService {
+	s.local = &local
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *GetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *GetMappingService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *GetMappingService) Pretty(pretty bool) *GetMappingService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetMappingService) buildURL() (string, url.Values, error) {
+	var index, typ []string
+
+	if len(s.index) > 0 {
+		index = s.index
+	} else {
+		index = []string{"_all"}
+	}
+
+	if len(s.typ) > 0 {
+		typ = s.typ
+	} else {
+		typ = []string{"_all"}
+	}
+
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{
+		"index": strings.Join(index, ","),
+		"type":  strings.Join(typ, ","),
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *GetMappingService) Validate() error {
+	return nil
+}
+
+// Do executes the operation. When successful, it returns a json.RawMessage.
+// If you specify an index, Elasticsearch returns HTTP status 404.
+// if you specify a type that does not exist, Elasticsearch returns
+// an empty map.
+func (s *GetMappingService) Do() (map[string]interface{}, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	var ret map[string]interface{}
+	if err := json.Unmarshal(res.Body, &ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}

+ 50 - 0
sword_base/olivere/elastic.v1/get_mapping_test.go

@@ -0,0 +1,50 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestGetMappingURL(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tests := []struct {
+		Indices  []string
+		Types    []string
+		Expected string
+	}{
+		{
+			[]string{},
+			[]string{},
+			"/_all/_mapping/_all",
+		},
+		{
+			[]string{},
+			[]string{"tweet"},
+			"/_all/_mapping/tweet",
+		},
+		{
+			[]string{"twitter"},
+			[]string{"tweet"},
+			"/twitter/_mapping/tweet",
+		},
+		{
+			[]string{"store-1", "store-2"},
+			[]string{"tweet", "user"},
+			"/store-1%2Cstore-2/_mapping/tweet%2Cuser",
+		},
+	}
+
+	for _, test := range tests {
+		path, _, err := client.GetMapping().Index(test.Indices...).Type(test.Types...).buildURL()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if path != test.Expected {
+			t.Errorf("expected %q; got: %q", test.Expected, path)
+		}
+	}
+}

+ 113 - 0
sword_base/olivere/elastic.v1/get_template.go

@@ -0,0 +1,113 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// GetTemplateService reads a search template.
+// It is documented at http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-template.html.
+type GetTemplateService struct {
+	client      *Client
+	pretty      bool
+	id          string
+	version     interface{}
+	versionType string
+}
+
+// NewGetTemplateService creates a new GetTemplateService.
+func NewGetTemplateService(client *Client) *GetTemplateService {
+	return &GetTemplateService{
+		client: client,
+	}
+}
+
+// Id is the template ID.
+func (s *GetTemplateService) Id(id string) *GetTemplateService {
+	s.id = id
+	return s
+}
+
+// Version is an explicit version number for concurrency control.
+func (s *GetTemplateService) Version(version interface{}) *GetTemplateService {
+	s.version = version
+	return s
+}
+
+// VersionType is a specific version type.
+func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService {
+	s.versionType = versionType
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *GetTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{
+		"id": s.id,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.version != nil {
+		params.Set("version", fmt.Sprintf("%v", s.version))
+	}
+	if s.versionType != "" {
+		params.Set("version_type", s.versionType)
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *GetTemplateService) Validate() error {
+	var invalid []string
+	if s.id == "" {
+		invalid = append(invalid, "Id")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation and returns the template.
+func (s *GetTemplateService) Do() (*GetTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return result
+	ret := new(GetTemplateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+type GetTemplateResponse struct {
+	Template string `json:"template"`
+}

+ 51 - 0
sword_base/olivere/elastic.v1/get_template_test.go

@@ -0,0 +1,51 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestGetPutDeleteTemplate(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	// This is a search template, not an index template!
+	tmpl := `{
+	"template": {
+		"query" : { "term" : { "{{my_field}}" : "{{my_value}}" } },
+		"size"  : "{{my_size}}"
+	},
+	"params":{
+		"my_field" : "user",
+		"my_value" : "olivere",
+		"my_size" : 5
+	}
+}`
+	putres, err := client.PutTemplate().Id("elastic-template").BodyString(tmpl).Do()
+	if err != nil {
+		t.Fatalf("expected no error; got: %v", err)
+	}
+	if putres == nil {
+		t.Fatalf("expected response; got: %v", putres)
+	}
+	if !putres.Created {
+		t.Fatalf("expected template to be created; got: %v", putres.Created)
+	}
+
+	// Always delete template
+	defer client.DeleteTemplate().Id("elastic-template").Do()
+
+	// Get template
+	getres, err := client.GetTemplate().Id("elastic-template").Do()
+	if err != nil {
+		t.Fatalf("expected no error; got: %v", err)
+	}
+	if getres == nil {
+		t.Fatalf("expected response; got: %v", getres)
+	}
+	if getres.Template == "" {
+		t.Errorf("expected template %q; got: %q", tmpl, getres.Template)
+	}
+}

+ 165 - 0
sword_base/olivere/elastic.v1/get_test.go

@@ -0,0 +1,165 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestGet(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Count documents
+	count, err := client.Count(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if count != 3 {
+		t.Errorf("expected Count = %d; got %d", 3, count)
+	}
+
+	// Get document 1
+	res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != true {
+		t.Errorf("expected Found = true; got %v", res.Found)
+	}
+	if res.Source == nil {
+		t.Errorf("expected Source != nil; got %v", res.Source)
+	}
+
+	// Get non existent document 99
+	res, err = client.Get().Index(testIndexName).Type("tweet").Id("99").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != false {
+		t.Errorf("expected Found = false; got %v", res.Found)
+	}
+	if res.Source != nil {
+		t.Errorf("expected Source == nil; got %v", res.Source)
+	}
+}
+
+func TestGetWithSourceFiltering(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Get document 1, without source
+	res, err := client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSource(false).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != true {
+		t.Errorf("expected Found = true; got %v", res.Found)
+	}
+	if res.Source != nil {
+		t.Errorf("expected Source == nil; got %v", res.Source)
+	}
+
+	// Get document 1, exclude Message field
+	fsc := NewFetchSourceContext(true).Exclude("message")
+	res, err = client.Get().Index(testIndexName).Type("tweet").Id("1").FetchSourceContext(fsc).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res.Found != true {
+		t.Errorf("expected Found = true; got %v", res.Found)
+	}
+	if res.Source == nil {
+		t.Errorf("expected Source != nil; got %v", res.Source)
+	}
+	var tw tweet
+	err = json.Unmarshal(*res.Source, &tw)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if tw.User != "olivere" {
+		t.Errorf("expected user %q; got: %q", "olivere", tw.User)
+	}
+	if tw.Message != "" {
+		t.Errorf("expected message %q; got: %q", "", tw.Message)
+	}
+}
+
+func TestGetFailsWithMissingParams(t *testing.T) {
+	// Mitigate against http://stackoverflow.com/questions/27491738/elasticsearch-go-index-failures-no-feature-for-name
+	client := setupTestClientAndCreateIndex(t)
+	if _, err := client.Get().Do(); err == nil {
+		t.Fatal("expected Get to fail")
+	}
+	if _, err := client.Get().Index(testIndexName).Do(); err == nil {
+		t.Fatal("expected Get to fail")
+	}
+	if _, err := client.Get().Type("tweet").Do(); err == nil {
+		t.Fatal("expected Get to fail")
+	}
+	if _, err := client.Get().Id("1").Do(); err == nil {
+		t.Fatal("expected Get to fail")
+	}
+	if _, err := client.Get().Index(testIndexName).Type("tweet").Do(); err == nil {
+		t.Fatal("expected Get to fail")
+	}
+	/*
+		if _, err := client.Get().Index(testIndexName).Id("1").Do(); err == nil {
+			t.Fatal("expected Get to fail")
+		}
+	*/
+	if _, err := client.Get().Type("tweet").Id("1").Do(); err == nil {
+		t.Fatal("expected Get to fail")
+	}
+}

+ 496 - 0
sword_base/olivere/elastic.v1/highlight.go

@@ -0,0 +1,496 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+// Highlight allows highlighting search results on one or more fields.
+// For details, see:
+// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-highlighting.html
+type Highlight struct {
+	fields                []*HighlighterField
+	tagsSchema            *string
+	highlightFilter       *bool
+	fragmentSize          *int
+	numOfFragments        *int
+	preTags               []string
+	postTags              []string
+	order                 *string
+	encoder               *string
+	requireFieldMatch     *bool
+	boundaryMaxScan       *int
+	boundaryChars         []rune
+	highlighterType       *string
+	fragmenter            *string
+	highlightQuery        Query
+	noMatchSize           *int
+	phraseLimit           *int
+	options               map[string]interface{}
+	forceSource           *bool
+	useExplicitFieldOrder bool
+}
+
+func NewHighlight() *Highlight {
+	hl := &Highlight{
+		fields:        make([]*HighlighterField, 0),
+		preTags:       make([]string, 0),
+		postTags:      make([]string, 0),
+		boundaryChars: make([]rune, 0),
+		options:       make(map[string]interface{}),
+	}
+	return hl
+}
+
+func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight {
+	hl.fields = append(hl.fields, fields...)
+	return hl
+}
+
+func (hl *Highlight) Field(name string) *Highlight {
+	field := NewHighlighterField(name)
+	hl.fields = append(hl.fields, field)
+	return hl
+}
+
+func (hl *Highlight) TagsSchema(schemaName string) *Highlight {
+	hl.tagsSchema = &schemaName
+	return hl
+}
+
+func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight {
+	hl.highlightFilter = &highlightFilter
+	return hl
+}
+
+func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight {
+	hl.fragmentSize = &fragmentSize
+	return hl
+}
+
+func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight {
+	hl.numOfFragments = &numOfFragments
+	return hl
+}
+
+func (hl *Highlight) Encoder(encoder string) *Highlight {
+	hl.encoder = &encoder
+	return hl
+}
+
+func (hl *Highlight) PreTags(preTags ...string) *Highlight {
+	hl.preTags = make([]string, 0)
+	hl.preTags = append(hl.preTags, preTags...)
+	return hl
+}
+
+func (hl *Highlight) PostTags(postTags ...string) *Highlight {
+	hl.postTags = make([]string, 0)
+	hl.postTags = append(hl.postTags, postTags...)
+	return hl
+}
+
+func (hl *Highlight) Order(order string) *Highlight {
+	hl.order = &order
+	return hl
+}
+
+func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight {
+	hl.requireFieldMatch = &requireFieldMatch
+	return hl
+}
+
+func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight {
+	hl.boundaryMaxScan = &boundaryMaxScan
+	return hl
+}
+
+func (hl *Highlight) BoundaryChars(boundaryChars ...rune) *Highlight {
+	hl.boundaryChars = make([]rune, 0)
+	hl.boundaryChars = append(hl.boundaryChars, boundaryChars...)
+	return hl
+}
+
+func (hl *Highlight) HighlighterType(highlighterType string) *Highlight {
+	hl.highlighterType = &highlighterType
+	return hl
+}
+
+func (hl *Highlight) Fragmenter(fragmenter string) *Highlight {
+	hl.fragmenter = &fragmenter
+	return hl
+}
+
+func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight {
+	hl.highlightQuery = highlightQuery
+	return hl
+}
+
+func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight {
+	hl.noMatchSize = &noMatchSize
+	return hl
+}
+
+func (hl *Highlight) Options(options map[string]interface{}) *Highlight {
+	hl.options = options
+	return hl
+}
+
+func (hl *Highlight) ForceSource(forceSource bool) *Highlight {
+	hl.forceSource = &forceSource
+	return hl
+}
+
+func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight {
+	hl.useExplicitFieldOrder = useExplicitFieldOrder
+	return hl
+}
+
+// Creates the query source for the bool query.
+func (hl *Highlight) Source() interface{} {
+	// Returns the map inside of "highlight":
+	// "highlight":{
+	//   ... this ...
+	// }
+	source := make(map[string]interface{})
+	if hl.tagsSchema != nil {
+		source["tags_schema"] = *hl.tagsSchema
+	}
+	if hl.preTags != nil && len(hl.preTags) > 0 {
+		source["pre_tags"] = hl.preTags
+	}
+	if hl.postTags != nil && len(hl.postTags) > 0 {
+		source["post_tags"] = hl.postTags
+	}
+	if hl.order != nil {
+		source["order"] = *hl.order
+	}
+	if hl.highlightFilter != nil {
+		source["highlight_filter"] = *hl.highlightFilter
+	}
+	if hl.fragmentSize != nil {
+		source["fragment_size"] = *hl.fragmentSize
+	}
+	if hl.numOfFragments != nil {
+		source["number_of_fragments"] = *hl.numOfFragments
+	}
+	if hl.encoder != nil {
+		source["encoder"] = *hl.encoder
+	}
+	if hl.requireFieldMatch != nil {
+		source["require_field_match"] = *hl.requireFieldMatch
+	}
+	if hl.boundaryMaxScan != nil {
+		source["boundary_max_scan"] = *hl.boundaryMaxScan
+	}
+	if hl.boundaryChars != nil && len(hl.boundaryChars) > 0 {
+		source["boundary_chars"] = hl.boundaryChars
+	}
+	if hl.highlighterType != nil {
+		source["type"] = *hl.highlighterType
+	}
+	if hl.fragmenter != nil {
+		source["fragmenter"] = *hl.fragmenter
+	}
+	if hl.highlightQuery != nil {
+		source["highlight_query"] = hl.highlightQuery.Source()
+	}
+	if hl.noMatchSize != nil {
+		source["no_match_size"] = *hl.noMatchSize
+	}
+	if hl.phraseLimit != nil {
+		source["phrase_limit"] = *hl.phraseLimit
+	}
+	if hl.options != nil && len(hl.options) > 0 {
+		source["options"] = hl.options
+	}
+	if hl.forceSource != nil {
+		source["force_source"] = *hl.forceSource
+	}
+
+	if hl.fields != nil && len(hl.fields) > 0 {
+		if hl.useExplicitFieldOrder {
+			// Use a slice for the fields
+			fields := make([]map[string]interface{}, 0)
+			for _, field := range hl.fields {
+				fmap := make(map[string]interface{})
+				fmap[field.Name] = field.Source()
+				fields = append(fields, fmap)
+			}
+			source["fields"] = fields
+		} else {
+			// Use a map for the fields
+			fields := make(map[string]interface{}, 0)
+			for _, field := range hl.fields {
+				fields[field.Name] = field.Source()
+			}
+			source["fields"] = fields
+		}
+	}
+
+	return source
+
+	/*
+		highlightS := make(map[string]interface{})
+
+		if hl.tagsSchema != "" {
+			highlightS["tags_schema"] = hl.tagsSchema
+		}
+		if len(hl.preTags) > 0 {
+			highlightS["pre_tags"] = hl.preTags
+		}
+		if len(hl.postTags) > 0 {
+			highlightS["post_tags"] = hl.postTags
+		}
+		if hl.order != "" {
+			highlightS["order"] = hl.order
+		}
+		if hl.encoder != "" {
+			highlightS["encoder"] = hl.encoder
+		}
+		if hl.requireFieldMatch != nil {
+			highlightS["require_field_match"] = *hl.requireFieldMatch
+		}
+		if hl.highlighterType != "" {
+			highlightS["type"] = hl.highlighterType
+		}
+		if hl.fragmenter != "" {
+			highlightS["fragmenter"] = hl.fragmenter
+		}
+		if hl.highlightQuery != nil {
+			highlightS["highlight_query"] = hl.highlightQuery.Source()
+		}
+		if hl.noMatchSize != nil {
+			highlightS["no_match_size"] = *hl.noMatchSize
+		}
+		if len(hl.options) > 0 {
+			highlightS["options"] = hl.options
+		}
+		if hl.forceSource != nil {
+			highlightS["force_source"] = *hl.forceSource
+		}
+		if len(hl.fields) > 0 {
+			fieldsS := make(map[string]interface{})
+			for _, field := range hl.fields {
+				fieldsS[field.Name] = field.Source()
+			}
+			highlightS["fields"] = fieldsS
+		}
+
+		return highlightS
+	*/
+}
+
+// HighlighterField specifies a highlighted field.
+type HighlighterField struct {
+	Name string
+
+	preTags           []string
+	postTags          []string
+	fragmentSize      int
+	fragmentOffset    int
+	numOfFragments    int
+	highlightFilter   *bool
+	order             *string
+	requireFieldMatch *bool
+	boundaryMaxScan   int
+	boundaryChars     []rune
+	highlighterType   *string
+	fragmenter        *string
+	highlightQuery    Query
+	noMatchSize       *int
+	matchedFields     []string
+	phraseLimit       *int
+	options           map[string]interface{}
+	forceSource       *bool
+
+	/*
+		Name              string
+		preTags           []string
+		postTags          []string
+		fragmentSize      int
+		numOfFragments    int
+		fragmentOffset    int
+		highlightFilter   *bool
+		order             string
+		requireFieldMatch *bool
+		boundaryMaxScan   int
+		boundaryChars     []rune
+		highlighterType   string
+		fragmenter        string
+		highlightQuery    Query
+		noMatchSize       *int
+		matchedFields     []string
+		options           map[string]interface{}
+		forceSource       *bool
+	*/
+}
+
+func NewHighlighterField(name string) *HighlighterField {
+	return &HighlighterField{
+		Name:            name,
+		preTags:         make([]string, 0),
+		postTags:        make([]string, 0),
+		fragmentSize:    -1,
+		fragmentOffset:  -1,
+		numOfFragments:  -1,
+		boundaryMaxScan: -1,
+		boundaryChars:   make([]rune, 0),
+		matchedFields:   make([]string, 0),
+		options:         make(map[string]interface{}),
+	}
+}
+
+func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField {
+	f.preTags = make([]string, 0)
+	f.preTags = append(f.preTags, preTags...)
+	return f
+}
+
+func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField {
+	f.postTags = make([]string, 0)
+	f.postTags = append(f.postTags, postTags...)
+	return f
+}
+
+func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField {
+	f.fragmentSize = fragmentSize
+	return f
+}
+
+func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField {
+	f.fragmentOffset = fragmentOffset
+	return f
+}
+
+func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField {
+	f.numOfFragments = numOfFragments
+	return f
+}
+
+func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField {
+	f.highlightFilter = &highlightFilter
+	return f
+}
+
+func (f *HighlighterField) Order(order string) *HighlighterField {
+	f.order = &order
+	return f
+}
+
+func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField {
+	f.requireFieldMatch = &requireFieldMatch
+	return f
+}
+
+func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField {
+	f.boundaryMaxScan = boundaryMaxScan
+	return f
+}
+
+func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField {
+	f.boundaryChars = make([]rune, 0)
+	f.boundaryChars = append(f.boundaryChars, boundaryChars...)
+	return f
+}
+
+func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField {
+	f.highlighterType = &highlighterType
+	return f
+}
+
+func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField {
+	f.fragmenter = &fragmenter
+	return f
+}
+
+func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField {
+	f.highlightQuery = highlightQuery
+	return f
+}
+
+func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField {
+	f.noMatchSize = &noMatchSize
+	return f
+}
+
+func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField {
+	f.options = options
+	return f
+}
+
+func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField {
+	f.matchedFields = make([]string, 0)
+	f.matchedFields = append(f.matchedFields, matchedFields...)
+	return f
+}
+
+func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField {
+	f.phraseLimit = &phraseLimit
+	return f
+}
+
+func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField {
+	f.forceSource = &forceSource
+	return f
+}
+
+func (f *HighlighterField) Source() interface{} {
+	source := make(map[string]interface{})
+
+	if f.preTags != nil && len(f.preTags) > 0 {
+		source["pre_tags"] = f.preTags
+	}
+	if f.postTags != nil && len(f.postTags) > 0 {
+		source["post_tags"] = f.postTags
+	}
+	if f.fragmentSize != -1 {
+		source["fragment_size"] = f.fragmentSize
+	}
+	if f.numOfFragments != -1 {
+		source["number_of_fragments"] = f.numOfFragments
+	}
+	if f.fragmentOffset != -1 {
+		source["fragment_offset"] = f.fragmentOffset
+	}
+	if f.highlightFilter != nil {
+		source["highlight_filter"] = *f.highlightFilter
+	}
+	if f.order != nil {
+		source["order"] = *f.order
+	}
+	if f.requireFieldMatch != nil {
+		source["require_field_match"] = *f.requireFieldMatch
+	}
+	if f.boundaryMaxScan != -1 {
+		source["boundary_max_scan"] = f.boundaryMaxScan
+	}
+	if f.boundaryChars != nil && len(f.boundaryChars) > 0 {
+		source["boundary_chars"] = f.boundaryChars
+	}
+	if f.highlighterType != nil {
+		source["type"] = *f.highlighterType
+	}
+	if f.fragmenter != nil {
+		source["fragmenter"] = *f.fragmenter
+	}
+	if f.highlightQuery != nil {
+		source["highlight_query"] = f.highlightQuery.Source()
+	}
+	if f.noMatchSize != nil {
+		source["no_match_size"] = *f.noMatchSize
+	}
+	if f.matchedFields != nil && len(f.matchedFields) > 0 {
+		source["matched_fields"] = f.matchedFields
+	}
+	if f.phraseLimit != nil {
+		source["phrase_limit"] = *f.phraseLimit
+	}
+	if f.options != nil && len(f.options) > 0 {
+		source["options"] = f.options
+	}
+	if f.forceSource != nil {
+		source["force_source"] = *f.forceSource
+	}
+
+	return source
+}

+ 168 - 0
sword_base/olivere/elastic.v1/highlight_test.go

@@ -0,0 +1,168 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	_ "net/http"
+	"testing"
+)
+
+func TestHighlighterField(t *testing.T) {
+	field := NewHighlighterField("grade")
+	data, err := json.Marshal(field.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlighterFieldWithOptions(t *testing.T) {
+	field := NewHighlighterField("grade").FragmentSize(2).NumOfFragments(1)
+	data, err := json.Marshal(field.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fragment_size":2,"number_of_fragments":1}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlightWithStringField(t *testing.T) {
+	builder := NewHighlight().Field("grade")
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fields":{"grade":{}}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlightWithFields(t *testing.T) {
+	gradeField := NewHighlighterField("grade")
+	builder := NewHighlight().Fields(gradeField)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fields":{"grade":{}}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlightWithMultipleFields(t *testing.T) {
+	gradeField := NewHighlighterField("grade")
+	colorField := NewHighlighterField("color")
+	builder := NewHighlight().Fields(gradeField, colorField)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fields":{"color":{},"grade":{}}}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlighterWithExplicitFieldOrder(t *testing.T) {
+	gradeField := NewHighlighterField("grade").FragmentSize(2)
+	colorField := NewHighlighterField("color").FragmentSize(2).NumOfFragments(1)
+	builder := NewHighlight().Fields(gradeField, colorField).UseExplicitFieldOrder(true)
+	data, err := json.Marshal(builder.Source())
+	if err != nil {
+		t.Fatalf("marshaling to JSON failed: %v", err)
+	}
+	got := string(data)
+	expected := `{"fields":[{"grade":{"fragment_size":2}},{"color":{"fragment_size":2,"number_of_fragments":1}}]}`
+	if got != expected {
+		t.Errorf("expected\n%s\n,got:\n%s", expected, got)
+	}
+}
+
+func TestHighlightWithTermQuery(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun to do."}
+
+	// Add all documents
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Specify highlighter
+	hl := NewHighlight()
+	hl = hl.Fields(NewHighlighterField("message"))
+	hl = hl.PreTags("<em>").PostTags("</em>")
+
+	// Match all should return all documents
+	query := NewPrefixQuery("message", "golang")
+	searchResult, err := client.Search().
+		Index(testIndexName).
+		Highlight(hl).
+		Query(&query).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if searchResult.Hits == nil {
+		t.Fatalf("expected SearchResult.Hits != nil; got nil")
+	}
+	if searchResult.Hits.TotalHits != 1 {
+		t.Fatalf("expected SearchResult.Hits.TotalHits = %d; got %d", 1, searchResult.Hits.TotalHits)
+	}
+	if len(searchResult.Hits.Hits) != 1 {
+		t.Fatalf("expected len(SearchResult.Hits.Hits) = %d; got %d", 1, len(searchResult.Hits.Hits))
+	}
+
+	hit := searchResult.Hits.Hits[0]
+	var tw tweet
+	if err := json.Unmarshal(*hit.Source, &tw); err != nil {
+		t.Fatal(err)
+	}
+	if hit.Highlight == nil || len(hit.Highlight) == 0 {
+		t.Fatal("expected hit to have a highlight; got nil")
+	}
+	if hl, found := hit.Highlight["message"]; found {
+		if len(hl) != 1 {
+			t.Fatalf("expected to have one highlight for field \"message\"; got %d", len(hl))
+		}
+		expected := "Welcome to <em>Golang</em> and Elasticsearch."
+		if hl[0] != expected {
+			t.Errorf("expected to have highlight \"%s\"; got \"%s\"", expected, hl[0])
+		}
+	} else {
+		t.Fatal("expected to have a highlight on field \"message\"; got none")
+	}
+}

+ 216 - 0
sword_base/olivere/elastic.v1/index.go

@@ -0,0 +1,216 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndexResult is the result of indexing a document in Elasticsearch.
+type IndexResult struct {
+	Index   string `json:"_index"`
+	Type    string `json:"_type"`
+	Id      string `json:"_id"`
+	Version int    `json:"_version"`
+	Created bool   `json:"created"`
+}
+
+// IndexService adds documents to Elasticsearch.
+type IndexService struct {
+	client      *Client
+	index       string
+	_type       string
+	id          string
+	routing     string
+	parent      string
+	opType      string
+	refresh     *bool
+	version     *int64
+	versionType string
+	timestamp   string
+	ttl         string
+	timeout     string
+	bodyString  string
+	bodyJson    interface{}
+	pretty      bool
+}
+
+func NewIndexService(client *Client) *IndexService {
+	builder := &IndexService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *IndexService) Index(name string) *IndexService {
+	b.index = name
+	return b
+}
+
+func (b *IndexService) Type(_type string) *IndexService {
+	b._type = _type
+	return b
+}
+
+func (b *IndexService) Id(id string) *IndexService {
+	b.id = id
+	return b
+}
+
+func (b *IndexService) Routing(routing string) *IndexService {
+	b.routing = routing
+	return b
+}
+
+func (b *IndexService) Parent(parent string) *IndexService {
+	b.parent = parent
+	return b
+}
+
+// OpType is either "create" or "index" (the default).
+func (b *IndexService) OpType(opType string) *IndexService {
+	b.opType = opType
+	return b
+}
+
+func (b *IndexService) Refresh(refresh bool) *IndexService {
+	b.refresh = &refresh
+	return b
+}
+
+func (b *IndexService) Version(version int64) *IndexService {
+	b.version = &version
+	return b
+}
+
+// VersionType is either "internal" (default), "external",
+// "external_gt", "external_gte", or "force".
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types
+// for details.
+func (b *IndexService) VersionType(versionType string) *IndexService {
+	b.versionType = versionType
+	return b
+}
+
+func (b *IndexService) Timestamp(timestamp string) *IndexService {
+	b.timestamp = timestamp
+	return b
+}
+
+func (b *IndexService) TTL(ttl string) *IndexService {
+	b.ttl = ttl
+	return b
+}
+
+func (b *IndexService) Timeout(timeout string) *IndexService {
+	b.timeout = timeout
+	return b
+}
+
+func (b *IndexService) BodyString(body string) *IndexService {
+	b.bodyString = body
+	return b
+}
+
+func (b *IndexService) BodyJson(json interface{}) *IndexService {
+	b.bodyJson = json
+	return b
+}
+
+func (b *IndexService) Pretty(pretty bool) *IndexService {
+	b.pretty = pretty
+	return b
+}
+
+func (b *IndexService) Do() (*IndexResult, error) {
+	// Build url
+	var path, method string
+	if b.id != "" {
+		// Create document with manual id
+		method = "PUT"
+		path = "/{index}/{type}/{id}"
+	} else {
+		// Automatic ID generation
+		// See: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-index_.html#index-creation
+		method = "POST"
+		path = "/{index}/{type}/"
+	}
+	path, err := uritemplates.Expand(path, map[string]string{
+		"index": b.index,
+		"type":  b._type,
+		"id":    b.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	// Parameters
+	params := make(url.Values)
+	if b.pretty {
+		params.Set("pretty", "true")
+	}
+	if b.routing != "" {
+		params.Set("routing", b.routing)
+	}
+	if b.parent != "" {
+		params.Set("parent", b.parent)
+	}
+	if b.opType != "" {
+		params.Set("op_type", b.opType)
+	}
+	if b.refresh != nil && *b.refresh {
+		params.Set("refresh", "true")
+	}
+	if b.version != nil {
+		params.Set("version", fmt.Sprintf("%d", *b.version))
+	}
+	if b.versionType != "" {
+		params.Set("version_type", b.versionType)
+	}
+	if b.timestamp != "" {
+		params.Set("timestamp", b.timestamp)
+	}
+	if b.ttl != "" {
+		params.Set("ttl", b.ttl)
+	}
+	if b.timeout != "" {
+		params.Set("timeout", b.timeout)
+	}
+
+	/*
+		routing string
+		parent string
+		opType string
+		refresh *bool
+		version *int64
+		versionType string
+		timestamp string
+		ttl string
+	*/
+
+	// Body
+	var body interface{}
+	if b.bodyJson != nil {
+		body = b.bodyJson
+	} else {
+		body = b.bodyString
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest(method, path, params, body)
+	if err != nil {
+		return nil, err
+	}
+	// Return result
+	ret := new(IndexResult)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}

+ 145 - 0
sword_base/olivere/elastic.v1/index_close.go

@@ -0,0 +1,145 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// CloseIndexService closes an index.
+// See documentation at http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type CloseIndexService struct {
+	client            *Client
+	pretty            bool
+	index             string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+	timeout           string
+	masterTimeout     string
+}
+
+// NewCloseIndexService creates a new CloseIndexService.
+func NewCloseIndexService(client *Client) *CloseIndexService {
+	return &CloseIndexService{client: client}
+}
+
+// Index is the name of the index.
+func (s *CloseIndexService) Index(index string) *CloseIndexService {
+	s.index = index
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *CloseIndexService) Timeout(timeout string) *CloseIndexService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *CloseIndexService) MasterTimeout(masterTimeout string) *CloseIndexService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *CloseIndexService) IgnoreUnavailable(ignoreUnavailable bool) *CloseIndexService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified).
+func (s *CloseIndexService) AllowNoIndices(allowNoIndices bool) *CloseIndexService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *CloseIndexService) ExpandWildcards(expandWildcards string) *CloseIndexService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *CloseIndexService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/_close", map[string]string{
+		"index": s.index,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *CloseIndexService) Validate() error {
+	var invalid []string
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *CloseIndexService) Do() (*CloseIndexResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("POST", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(CloseIndexResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// CloseIndexResponse is the response of CloseIndexService.Do.
+type CloseIndexResponse struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 50 - 0
sword_base/olivere/elastic.v1/index_exists.go

@@ -0,0 +1,50 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+type IndexExistsService struct {
+	client *Client
+	index  string
+}
+
+func NewIndexExistsService(client *Client) *IndexExistsService {
+	builder := &IndexExistsService{
+		client: client,
+	}
+	return builder
+}
+
+func (b *IndexExistsService) Index(index string) *IndexExistsService {
+	b.index = index
+	return b
+}
+
+func (b *IndexExistsService) Do() (bool, error) {
+	// Build url
+	path, err := uritemplates.Expand("/{index}", map[string]string{
+		"index": b.index,
+	})
+	if err != nil {
+		return false, err
+	}
+
+	// Get response
+	res, err := b.client.PerformRequest("HEAD", path, nil, nil)
+	if err != nil {
+		return false, err
+	}
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 186 - 0
sword_base/olivere/elastic.v1/index_get.go

@@ -0,0 +1,186 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// IndicesGetService retrieves information about one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-get-index.html.
+type IndicesGetService struct {
+	client            *Client
+	pretty            bool
+	index             []string
+	feature           []string
+	expandWildcards   string
+	local             *bool
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+}
+
+// NewIndicesGetService creates a new IndicesGetService.
+func NewIndicesGetService(client *Client) *IndicesGetService {
+	return &IndicesGetService{
+		client:  client,
+		index:   make([]string, 0),
+		feature: make([]string, 0),
+	}
+}
+
+// Index is a list of index names. Use _all to retrieve information about
+// all indices of a cluster.
+func (s *IndicesGetService) Index(index ...string) *IndicesGetService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Feature is a list of features (e.g. _settings,_mappings,_warmers, and _aliases).
+func (s *IndicesGetService) Feature(feature ...string) *IndicesGetService {
+	s.feature = append(s.feature, feature...)
+	return s
+}
+
+// ExpandWildcards indicates whether wildcard expressions should
+// get expanded to open or closed indices (default: open).
+func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// Local indicates whether to return local information (do not retrieve
+// the state from master node (default: false)).
+func (s *IndicesGetService) Local(local bool) *IndicesGetService {
+	s.local = &local
+	return s
+}
+
+// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false).
+func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard expression
+// resolves to no concrete indices (default: false).
+func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetService) buildURL() (string, url.Values, error) {
+	var err error
+	var path string
+	var index []string
+
+	if len(s.index) > 0 {
+		index = s.index
+	} else {
+		index = []string{"_all"}
+	}
+
+	if len(s.feature) > 0 {
+		// Build URL
+		path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{
+			"index":   strings.Join(index, ","),
+			"feature": strings.Join(s.feature, ","),
+		})
+	} else {
+		// Build URL
+		path, err = uritemplates.Expand("/{index}", map[string]string{
+			"index": strings.Join(index, ","),
+		})
+	}
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetService) Validate() error {
+	var invalid []string
+	if len(s.index) == 0 {
+		invalid = append(invalid, "Index")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetService) Do() (map[string]*IndicesGetResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	var ret map[string]*IndicesGetResponse
+	if err := json.Unmarshal(res.Body, &ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesGetResponse is part of the response of IndicesGetService.Do.
+type IndicesGetResponse struct {
+	Aliases  map[string]interface{} `json:"aliases"`
+	Mappings map[string]interface{} `json:"mappings"`
+	Settings map[string]interface{} `json:"settings"`
+	Warmers  map[string]interface{} `json:"warmers"`
+}

+ 189 - 0
sword_base/olivere/elastic.v1/index_get_settings.go

@@ -0,0 +1,189 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+var (
+	_ = fmt.Print
+	_ = log.Print
+	_ = strings.Index
+	_ = uritemplates.Expand
+	_ = url.Parse
+)
+
+// IndicesGetSettingsService allows to retrieve settings of one
+// or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-get-settings.html.
+type IndicesGetSettingsService struct {
+	client            *Client
+	pretty            bool
+	index             []string
+	name              []string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+	expandWildcards   string
+	flatSettings      *bool
+	local             *bool
+}
+
+// NewIndicesGetSettingsService creates a new IndicesGetSettingsService.
+func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService {
+	return &IndicesGetSettingsService{
+		client: client,
+		index:  make([]string, 0),
+		name:   make([]string, 0),
+	}
+}
+
+// Index is a list of index names; use `_all` or empty string to perform the operation on all indices.
+func (s *IndicesGetSettingsService) Index(index ...string) *IndicesGetSettingsService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Name are the names of the settings that should be included.
+func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService {
+	s.name = append(s.name, name...)
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression
+// to concrete indices that are open, closed or both.
+// Options: open, closed, none, all. Default: open,closed.
+func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// FlatSettings indicates whether to return settings in flat format (default: false).
+func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService {
+	s.flatSettings = &flatSettings
+	return s
+}
+
+// Local indicates whether to return local information, do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService {
+	s.local = &local
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) {
+	var err error
+	var path string
+	var index []string
+
+	if len(s.index) > 0 {
+		index = s.index
+	} else {
+		index = []string{"_all"}
+	}
+
+	if len(s.name) > 0 {
+		// Build URL
+		path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{
+			"index": strings.Join(index, ","),
+			"name":  strings.Join(s.name, ","),
+		})
+	} else {
+		// Build URL
+		path, err = uritemplates.Expand("/{index}/_settings", map[string]string{
+			"index": strings.Join(index, ","),
+		})
+	}
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.flatSettings != nil {
+		params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetSettingsService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetSettingsService) Do() (map[string]*IndicesGetSettingsResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	var ret map[string]*IndicesGetSettingsResponse
+	if err := json.Unmarshal(res.Body, &ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do.
+type IndicesGetSettingsResponse struct {
+	Settings map[string]interface{} `json:"settings"`
+}

+ 81 - 0
sword_base/olivere/elastic.v1/index_get_settings_test.go

@@ -0,0 +1,81 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestIndexGetSettingsURL(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tests := []struct {
+		Indices  []string
+		Names    []string
+		Expected string
+	}{
+		{
+			[]string{},
+			[]string{},
+			"/_all/_settings",
+		},
+		{
+			[]string{},
+			[]string{"index.merge.*"},
+			"/_all/_settings/index.merge.%2A",
+		},
+		{
+			[]string{"twitter-*"},
+			[]string{"index.merge.*", "_settings"},
+			"/twitter-%2A/_settings/index.merge.%2A%2C_settings",
+		},
+		{
+			[]string{"store-1", "store-2"},
+			[]string{"index.merge.*", "_settings"},
+			"/store-1%2Cstore-2/_settings/index.merge.%2A%2C_settings",
+		},
+	}
+
+	for _, test := range tests {
+		path, _, err := client.IndexGetSettings().Index(test.Indices...).Name(test.Names...).buildURL()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if path != test.Expected {
+			t.Errorf("expected %q; got: %q", test.Expected, path)
+		}
+	}
+}
+
+func TestIndexGetSettingsService(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	esversion, err := client.ElasticsearchVersion(DefaultURL)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if esversion < "1.4.0" {
+		t.Skip("Index Get API is available since 1.4")
+		return
+	}
+
+	res, err := client.IndexGetSettings().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatalf("expected result; got: %v", res)
+	}
+	info, found := res[testIndexName]
+	if !found {
+		t.Fatalf("expected index %q to be found; got: %v", testIndexName, found)
+	}
+	if info == nil {
+		t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info)
+	}
+	if info.Settings == nil {
+		t.Fatalf("expected index settings of %q to be != nil; got: %v", testIndexName, info.Settings)
+	}
+}

+ 84 - 0
sword_base/olivere/elastic.v1/index_get_test.go

@@ -0,0 +1,84 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestIndexGetURL(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tests := []struct {
+		Indices  []string
+		Features []string
+		Expected string
+	}{
+		{
+			[]string{},
+			[]string{},
+			"/_all",
+		},
+		{
+			[]string{},
+			[]string{"_mappings"},
+			"/_all/_mappings",
+		},
+		{
+			[]string{"twitter"},
+			[]string{"_mappings", "_settings"},
+			"/twitter/_mappings%2C_settings",
+		},
+		{
+			[]string{"store-1", "store-2"},
+			[]string{"_mappings", "_settings"},
+			"/store-1%2Cstore-2/_mappings%2C_settings",
+		},
+	}
+
+	for _, test := range tests {
+		path, _, err := client.IndexGet().Index(test.Indices...).Feature(test.Features...).buildURL()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if path != test.Expected {
+			t.Errorf("expected %q; got: %q", test.Expected, path)
+		}
+	}
+}
+
+func TestIndexGetService(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	esversion, err := client.ElasticsearchVersion(DefaultURL)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if esversion < "1.4.0" {
+		t.Skip("Index Get API is available since 1.4")
+		return
+	}
+
+	res, err := client.IndexGet().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if res == nil {
+		t.Fatalf("expected result; got: %v", res)
+	}
+	info, found := res[testIndexName]
+	if !found {
+		t.Fatalf("expected index %q to be found; got: %v", testIndexName, found)
+	}
+	if info == nil {
+		t.Fatalf("expected index %q to be != nil; got: %v", testIndexName, info)
+	}
+	if info.Mappings == nil {
+		t.Errorf("expected mappings to be != nil; got: %v", info.Mappings)
+	}
+	if info.Settings == nil {
+		t.Errorf("expected settings to be != nil; got: %v", info.Settings)
+	}
+}

+ 146 - 0
sword_base/olivere/elastic.v1/index_open.go

@@ -0,0 +1,146 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// OpenIndexService opens an index.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-open-close.html.
+type OpenIndexService struct {
+	client            *Client
+	pretty            bool
+	index             string
+	expandWildcards   string
+	timeout           string
+	masterTimeout     string
+	ignoreUnavailable *bool
+	allowNoIndices    *bool
+}
+
+// NewOpenIndexService creates a new OpenIndexService.
+func NewOpenIndexService(client *Client) *OpenIndexService {
+	return &OpenIndexService{client: client}
+}
+
+// Index is the name of the index to open.
+func (s *OpenIndexService) Index(index string) *OpenIndexService {
+	s.index = index
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *OpenIndexService) Timeout(timeout string) *OpenIndexService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *OpenIndexService) MasterTimeout(masterTimeout string) *OpenIndexService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should
+// be ignored when unavailable (missing or closed).
+func (s *OpenIndexService) IgnoreUnavailable(ignoreUnavailable bool) *OpenIndexService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *OpenIndexService) AllowNoIndices(allowNoIndices bool) *OpenIndexService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both..
+func (s *OpenIndexService) ExpandWildcards(expandWildcards string) *OpenIndexService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *OpenIndexService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/_open", map[string]string{
+		"index": s.index,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *OpenIndexService) Validate() error {
+	var invalid []string
+	if s.index == "" {
+		invalid = append(invalid, "Index")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *OpenIndexService) Do() (*OpenIndexResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("POST", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(OpenIndexResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// OpenIndexResponse is the response of OpenIndexService.Do.
+type OpenIndexResponse struct {
+	Acknowledged bool `json:"acknowledged"`
+}

+ 517 - 0
sword_base/olivere/elastic.v1/index_test.go

@@ -0,0 +1,517 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"testing"
+	"time"
+)
+
+const (
+	testIndexName  = "elastic-test"
+	testIndexName2 = "elastic-test2"
+	testMapping    = `
+{
+	"settings":{
+		"number_of_shards":1,
+		"number_of_replicas":0
+	},
+	"mappings":{
+		"tweet":{
+			"properties":{
+				"tags":{
+					"type":"string"
+				},
+				"location":{
+					"type":"geo_point"
+				},
+				"suggest_field":{
+					"type":"completion",
+					"payloads":true
+				}
+			}
+		}
+	}
+}
+`
+)
+
+type tweet struct {
+	User     string        `json:"user"`
+	Message  string        `json:"message"`
+	Retweets int           `json:"retweets"`
+	Image    string        `json:"image,omitempty"`
+	Created  time.Time     `json:"created,omitempty"`
+	Tags     []string      `json:"tags,omitempty"`
+	Location string        `json:"location,omitempty"`
+	Suggest  *SuggestField `json:"suggest_field,omitempty"`
+}
+
+func (t tweet) String() string {
+	return fmt.Sprintf("tweet{User:%q,Message:%q,Retweets:%d}", t.User, t.Message, t.Retweets)
+}
+
+func isTravis() bool {
+	return os.Getenv("TRAVIS") != ""
+}
+
+func travisGoVersion() string {
+	return os.Getenv("TRAVIS_GO_VERSION")
+}
+
+type logger interface {
+	Error(args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatal(args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Fail()
+	FailNow()
+	Log(args ...interface{})
+	Logf(format string, args ...interface{})
+}
+
+func setupTestClient(t logger, options ...ClientOptionFunc) (client *Client) {
+	var err error
+
+	client, err = NewClient(options...)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	client.DeleteIndex(testIndexName).Do()
+	client.DeleteIndex(testIndexName2).Do()
+
+	return client
+}
+
+func setupTestClientAndCreateIndex(t logger, options ...ClientOptionFunc) *Client {
+	client := setupTestClient(t, options...)
+
+	// Create index
+	createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if createIndex == nil {
+		t.Errorf("expected result to be != nil; got: %v", createIndex)
+	}
+
+	// Create second index
+	createIndex2, err := client.CreateIndex(testIndexName2).Body(testMapping).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if createIndex2 == nil {
+		t.Errorf("expected result to be != nil; got: %v", createIndex2)
+	}
+
+	return client
+}
+
+func setupTestClientAndCreateIndexAndAddDocs(t logger, options ...ClientOptionFunc) *Client {
+	client := setupTestClientAndCreateIndex(t, options...)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+	tweet2 := tweet{User: "olivere", Message: "Another unrelated topic."}
+	tweet3 := tweet{User: "sandrae", Message: "Cycling is fun."}
+
+	_, err := client.Index().Index(testIndexName).Type("tweet").Id("1").BodyJson(&tweet1).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("2").BodyJson(&tweet2).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Index().Index(testIndexName).Type("tweet").Id("3").BodyJson(&tweet3).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	return client
+}
+
+func TestIndexLifecycle(t *testing.T) {
+	client := setupTestClient(t)
+
+	// Create index
+	createIndex, err := client.CreateIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !createIndex.Acknowledged {
+		t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+	}
+
+	// Check if index exists
+	indexExists, err := client.IndexExists(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !indexExists {
+		t.Fatalf("index %s should exist, but doesn't\n", testIndexName)
+	}
+
+	// Delete index
+	deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !deleteIndex.Acknowledged {
+		t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+	}
+
+	// Check if index exists
+	indexExists, err = client.IndexExists(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexExists {
+		t.Fatalf("index %s should not exist, but does\n", testIndexName)
+	}
+}
+
+func TestIndexExistScenarios(t *testing.T) {
+	client := setupTestClient(t)
+
+	// Should return false if index does not exist
+	indexExists, err := client.IndexExists(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexExists {
+		t.Fatalf("expected index exists to return %v, got %v", false, indexExists)
+	}
+
+	// Create index
+	createIndex, err := client.CreateIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !createIndex.Acknowledged {
+		t.Errorf("expected CreateIndexResult.Ack %v; got %v", true, createIndex.Acknowledged)
+	}
+
+	// Should return true if index does not exist
+	indexExists, err = client.IndexExists(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !indexExists {
+		t.Fatalf("expected index exists to return %v, got %v", true, indexExists)
+	}
+}
+
+// TODO(oe): Find out why this test fails on Travis CI.
+/*
+func TestIndexOpenAndClose(t *testing.T) {
+	client := setupTestClient(t)
+
+	// Create index
+	createIndex, err := client.CreateIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !createIndex.Acknowledged {
+		t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+	}
+	defer func() {
+		// Delete index
+		deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !deleteIndex.Acknowledged {
+			t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+		}
+	}()
+
+	waitForYellow := func() {
+		// Wait for status yellow
+		res, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("15s").Do()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if res != nil && res.TimedOut {
+			t.Fatalf("cluster time out waiting for status %q", "yellow")
+		}
+	}
+
+	// Wait for cluster
+	waitForYellow()
+
+	// Close index
+	cresp, err := client.CloseIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !cresp.Acknowledged {
+		t.Fatalf("expected close index of %q to be acknowledged\n", testIndexName)
+	}
+
+	// Wait for cluster
+	waitForYellow()
+
+	// Open index again
+	oresp, err := client.OpenIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !oresp.Acknowledged {
+		t.Fatalf("expected open index of %q to be acknowledged\n", testIndexName)
+	}
+}
+*/
+
+func TestDocumentLifecycle(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+	// Add a document
+	indexResult, err := client.Index().
+		Index(testIndexName).
+		Type("tweet").
+		Id("1").
+		BodyJson(&tweet1).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", indexResult)
+	}
+
+	// Exists
+	exists, err := client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !exists {
+		t.Errorf("expected exists %v; got %v", true, exists)
+	}
+
+	// Get document
+	getResult, err := client.Get().
+		Index(testIndexName).
+		Type("tweet").
+		Id("1").
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if getResult.Index != testIndexName {
+		t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+	}
+	if getResult.Type != "tweet" {
+		t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+	}
+	if getResult.Id != "1" {
+		t.Errorf("expected GetResult.Id %q; got %q", "1", getResult.Id)
+	}
+	if getResult.Source == nil {
+		t.Errorf("expected GetResult.Source to be != nil; got nil")
+	}
+
+	// Decode the Source field
+	var tweetGot tweet
+	err = json.Unmarshal(*getResult.Source, &tweetGot)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if tweetGot.User != tweet1.User {
+		t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
+	}
+	if tweetGot.Message != tweet1.Message {
+		t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
+	}
+
+	// Delete document again
+	deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if deleteResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", deleteResult)
+	}
+
+	// Exists
+	exists, err = client.Exists().Index(testIndexName).Type("tweet").Id("1").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if exists {
+		t.Errorf("expected exists %v; got %v", false, exists)
+	}
+}
+
+func TestDocumentLifecycleWithAutomaticIDGeneration(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tweet1 := tweet{User: "olivere", Message: "Welcome to Golang and Elasticsearch."}
+
+	// Add a document
+	indexResult, err := client.Index().
+		Index(testIndexName).
+		Type("tweet").
+		BodyJson(&tweet1).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if indexResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", indexResult)
+	}
+	if indexResult.Id == "" {
+		t.Fatalf("expected Es to generate an automatic ID, got: %v", indexResult.Id)
+	}
+	id := indexResult.Id
+
+	// Exists
+	exists, err := client.Exists().Index(testIndexName).Type("tweet").Id(id).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !exists {
+		t.Errorf("expected exists %v; got %v", true, exists)
+	}
+
+	// Get document
+	getResult, err := client.Get().
+		Index(testIndexName).
+		Type("tweet").
+		Id(id).
+		Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if getResult.Index != testIndexName {
+		t.Errorf("expected GetResult.Index %q; got %q", testIndexName, getResult.Index)
+	}
+	if getResult.Type != "tweet" {
+		t.Errorf("expected GetResult.Type %q; got %q", "tweet", getResult.Type)
+	}
+	if getResult.Id != id {
+		t.Errorf("expected GetResult.Id %q; got %q", id, getResult.Id)
+	}
+	if getResult.Source == nil {
+		t.Errorf("expected GetResult.Source to be != nil; got nil")
+	}
+
+	// Decode the Source field
+	var tweetGot tweet
+	err = json.Unmarshal(*getResult.Source, &tweetGot)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if tweetGot.User != tweet1.User {
+		t.Errorf("expected Tweet.User to be %q; got %q", tweet1.User, tweetGot.User)
+	}
+	if tweetGot.Message != tweet1.Message {
+		t.Errorf("expected Tweet.Message to be %q; got %q", tweet1.Message, tweetGot.Message)
+	}
+
+	// Delete document again
+	deleteResult, err := client.Delete().Index(testIndexName).Type("tweet").Id(id).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if deleteResult == nil {
+		t.Errorf("expected result to be != nil; got: %v", deleteResult)
+	}
+
+	// Exists
+	exists, err = client.Exists().Index(testIndexName).Type("tweet").Id(id).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if exists {
+		t.Errorf("expected exists %v; got %v", false, exists)
+	}
+}
+
+func TestIndexCreateExistsOpenCloseDelete(t *testing.T) {
+	// TODO: Find out how to make these test robust
+	t.Skip("test fails regularly with 409 (Conflict): " +
+		"IndexPrimaryShardNotAllocatedException[[elastic-test] " +
+		"primary not allocated post api... skipping")
+
+	client := setupTestClient(t)
+
+	// Create index
+	createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if createIndex == nil {
+		t.Fatalf("expected response; got: %v", createIndex)
+	}
+	if !createIndex.Acknowledged {
+		t.Errorf("expected ack for creating index; got: %v", createIndex.Acknowledged)
+	}
+
+	// Exists
+	indexExists, err := client.IndexExists(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !indexExists {
+		t.Fatalf("expected index exists=%v; got %v", true, indexExists)
+	}
+
+	// Flush
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Close index
+	closeIndex, err := client.CloseIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if closeIndex == nil {
+		t.Fatalf("expected response; got: %v", closeIndex)
+	}
+	if !closeIndex.Acknowledged {
+		t.Errorf("expected ack for closing index; got: %v", closeIndex.Acknowledged)
+	}
+
+	// Open index
+	openIndex, err := client.OpenIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if openIndex == nil {
+		t.Fatalf("expected response; got: %v", openIndex)
+	}
+	if !openIndex.Acknowledged {
+		t.Errorf("expected ack for opening index; got: %v", openIndex.Acknowledged)
+	}
+
+	// Flush
+	_, err = client.Flush().Index(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	// Delete index
+	deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if deleteIndex == nil {
+		t.Fatalf("expected response; got: %v", deleteIndex)
+	}
+	if !deleteIndex.Acknowledged {
+		t.Errorf("expected ack for deleting index; got %v", deleteIndex.Acknowledged)
+	}
+}

+ 122 - 0
sword_base/olivere/elastic.v1/indices_delete_template.go

@@ -0,0 +1,122 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesDeleteTemplateService deletes index templates.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesDeleteTemplateService struct {
+	client        *Client
+	pretty        bool
+	name          string
+	timeout       string
+	masterTimeout string
+}
+
+// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService.
+func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService {
+	return &IndicesDeleteTemplateService{
+		client: client,
+	}
+}
+
+// Name is the name of the template.
+func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService {
+	s.name = name
+	return s
+}
+
+// Timeout is an explicit operation timeout.
+func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService {
+	s.timeout = timeout
+	return s
+}
+
+// MasterTimeout specifies the timeout for connection to master.
+func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService {
+	s.masterTimeout = masterTimeout
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+		"name": s.name,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.timeout != "" {
+		params.Set("timeout", s.timeout)
+	}
+	if s.masterTimeout != "" {
+		params.Set("master_timeout", s.masterTimeout)
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesDeleteTemplateService) Validate() error {
+	var invalid []string
+	if s.name == "" {
+		invalid = append(invalid, "Name")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesDeleteTemplateService) Do() (*IndicesDeleteTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("DELETE", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	ret := new(IndicesDeleteTemplateResponse)
+	if err := json.Unmarshal(res.Body, ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do.
+type IndicesDeleteTemplateResponse struct {
+	Acknowledged bool `json:"acknowledged,omitempty"`
+}

+ 107 - 0
sword_base/olivere/elastic.v1/indices_exists_template.go

@@ -0,0 +1,107 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"net/url"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesExistsTemplateService checks if a given template exists.
+// See http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-templates.html#indices-templates-exists
+// for documentation.
+type IndicesExistsTemplateService struct {
+	client *Client
+	pretty bool
+	name   string
+	local  *bool
+}
+
+// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService.
+func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService {
+	return &IndicesExistsTemplateService{
+		client: client,
+	}
+}
+
+// Name is the name of the template.
+func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService {
+	s.name = name
+	return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService {
+	s.local = &local
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	path, err := uritemplates.Expand("/_template/{name}", map[string]string{
+		"name": s.name,
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTemplateService) Validate() error {
+	var invalid []string
+	if s.name == "" {
+		invalid = append(invalid, "Name")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTemplateService) Do() (bool, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return false, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return false, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("HEAD", path, params, nil)
+	if err != nil {
+		return false, err
+	}
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 68 - 0
sword_base/olivere/elastic.v1/indices_exists_template_test.go

@@ -0,0 +1,68 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestIndexExistsTemplate(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tmpl := `{
+	"template":"elastic-test*",
+	"settings":{
+		"number_of_shards":1,
+		"number_of_replicas":0
+	},
+	"mappings":{
+		"tweet":{
+			"properties":{
+				"tags":{
+					"type":"string"
+				},
+				"location":{
+					"type":"geo_point"
+				},
+				"suggest_field":{
+					"type":"completion",
+					"payloads":true
+				}
+			}
+		}
+	}
+}`
+	putres, err := client.IndexPutTemplate("elastic-template").BodyString(tmpl).Do()
+	if err != nil {
+		t.Fatalf("expected no error; got: %v", err)
+	}
+	if putres == nil {
+		t.Fatalf("expected response; got: %v", putres)
+	}
+	if !putres.Acknowledged {
+		t.Fatalf("expected index template to be ack'd; got: %v", putres.Acknowledged)
+	}
+
+	// Always delete template
+	defer client.IndexDeleteTemplate("elastic-template").Do()
+
+	// Check if template exists
+	exists, err := client.IndexTemplateExists("elastic-template").Do()
+	if err != nil {
+		t.Fatalf("expected no error; got: %v", err)
+	}
+	if !exists {
+		t.Fatalf("expected index template %q to exist; got: %v", "elastic-template", exists)
+	}
+
+	// Get template
+	getres, err := client.IndexGetTemplate("elastic-template").Do()
+	if err != nil {
+		t.Fatalf("expected no error; got: %v", err)
+	}
+	if getres == nil {
+		t.Fatalf("expected to get index template %q; got: %v", "elastic-template", getres)
+	}
+}

+ 155 - 0
sword_base/olivere/elastic.v1/indices_exists_type.go

@@ -0,0 +1,155 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesExistsTypeService checks if one or more types exist in one or more indices.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-types-exists.html.
+type IndicesExistsTypeService struct {
+	client            *Client
+	pretty            bool
+	index             []string
+	typ               []string
+	allowNoIndices    *bool
+	expandWildcards   string
+	local             *bool
+	ignoreUnavailable *bool
+}
+
+// NewIndicesExistsTypeService creates a new IndicesExistsTypeService.
+func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService {
+	return &IndicesExistsTypeService{
+		client: client,
+		index:  make([]string, 0),
+		typ:    make([]string, 0),
+	}
+}
+
+// Index is a list of index names; use `_all` to check the types across all indices.
+func (s *IndicesExistsTypeService) Index(index ...string) *IndicesExistsTypeService {
+	s.index = append(s.index, index...)
+	return s
+}
+
+// Type is a list of document types to check.
+func (s *IndicesExistsTypeService) Type(typ ...string) *IndicesExistsTypeService {
+	s.typ = append(s.typ, typ...)
+	return s
+}
+
+// IgnoreUnavailable indicates whether specified concrete indices should be
+// ignored when unavailable (missing or closed).
+func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService {
+	s.ignoreUnavailable = &ignoreUnavailable
+	return s
+}
+
+// AllowNoIndices indicates whether to ignore if a wildcard indices
+// expression resolves into no concrete indices.
+// (This includes `_all` string or when no indices have been specified).
+func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService {
+	s.allowNoIndices = &allowNoIndices
+	return s
+}
+
+// ExpandWildcards indicates whether to expand wildcard expression to
+// concrete indices that are open, closed or both.
+func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService {
+	s.expandWildcards = expandWildcards
+	return s
+}
+
+// Local specifies whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService {
+	s.local = &local
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) {
+	if err := s.Validate(); err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Build URL
+	path, err := uritemplates.Expand("/{index}/{type}", map[string]string{
+		"type":  strings.Join(s.typ, ","),
+		"index": strings.Join(s.index, ","),
+	})
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.expandWildcards != "" {
+		params.Set("expand_wildcards", s.expandWildcards)
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	if s.ignoreUnavailable != nil {
+		params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable))
+	}
+	if s.allowNoIndices != nil {
+		params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesExistsTypeService) Validate() error {
+	var invalid []string
+	if len(s.index) == 0 {
+		invalid = append(invalid, "Index")
+	}
+	if len(s.typ) == 0 {
+		invalid = append(invalid, "Type")
+	}
+	if len(invalid) > 0 {
+		return fmt.Errorf("missing required fields: %v", invalid)
+	}
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesExistsTypeService) Do() (bool, error) {
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return false, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("HEAD", path, params, nil)
+	if err != nil {
+		return false, err
+	}
+
+	// Return operation response
+	if res.StatusCode == 200 {
+		return true, nil
+	} else if res.StatusCode == 404 {
+		return false, nil
+	}
+	return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode)
+}

+ 121 - 0
sword_base/olivere/elastic.v1/indices_exists_type_test.go

@@ -0,0 +1,121 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestTypeExistsBuildURL(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tests := []struct {
+		Indices               []string
+		Types                 []string
+		Expected              string
+		ExpectValidateFailure bool
+	}{
+		{
+			[]string{},
+			[]string{},
+			"",
+			true,
+		},
+		{
+			[]string{"index1"},
+			[]string{},
+			"",
+			true,
+		},
+		{
+			[]string{},
+			[]string{"type1"},
+			"",
+			true,
+		},
+		{
+			[]string{"index1"},
+			[]string{"type1"},
+			"/index1/type1",
+			false,
+		},
+		{
+			[]string{"index1", "index2"},
+			[]string{"type1"},
+			"/index1%2Cindex2/type1",
+			false,
+		},
+		{
+			[]string{"index1", "index2"},
+			[]string{"type1", "type2"},
+			"/index1%2Cindex2/type1%2Ctype2",
+			false,
+		},
+	}
+
+	for i, test := range tests {
+		err := client.TypeExists().Index(test.Indices...).Type(test.Types...).Validate()
+		if err == nil && test.ExpectValidateFailure {
+			t.Errorf("case #%d: expected validate to fail", i+1)
+			continue
+		}
+		if err != nil && !test.ExpectValidateFailure {
+			t.Errorf("case #%d: expected validate to succeed", i+1)
+			continue
+		}
+		if !test.ExpectValidateFailure {
+			path, _, err := client.TypeExists().Index(test.Indices...).Type(test.Types...).buildURL()
+			if err != nil {
+				t.Fatalf("case #%d: %v", i+1, err)
+			}
+			if path != test.Expected {
+				t.Errorf("case #%d: expected %q; got: %q", i+1, test.Expected, path)
+			}
+		}
+	}
+}
+
+func TestTypeExists(t *testing.T) {
+	client := setupTestClient(t)
+
+	// Create index with tweet type
+	createIndex, err := client.CreateIndex(testIndexName).Body(testMapping).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if createIndex == nil {
+		t.Errorf("expected result to be != nil; got: %v", createIndex)
+	}
+	if !createIndex.Acknowledged {
+		t.Errorf("expected CreateIndexResult.Acknowledged %v; got %v", true, createIndex.Acknowledged)
+	}
+
+	// Check if type exists
+	exists, err := client.TypeExists().Index(testIndexName).Type("tweet").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !exists {
+		t.Fatalf("type %s should exist in index %s, but doesn't\n", "tweet", testIndexName)
+	}
+
+	// Delete index
+	deleteIndex, err := client.DeleteIndex(testIndexName).Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if !deleteIndex.Acknowledged {
+		t.Errorf("expected DeleteIndexResult.Acknowledged %v; got %v", true, deleteIndex.Acknowledged)
+	}
+
+	// Check if type exists
+	exists, err = client.TypeExists().Index(testIndexName).Type("tweet").Do()
+	if err != nil {
+		t.Fatal(err)
+	}
+	if exists {
+		t.Fatalf("type %s should not exist in index %s, but it does\n", "tweet", testIndexName)
+	}
+}

+ 128 - 0
sword_base/olivere/elastic.v1/indices_get_template.go

@@ -0,0 +1,128 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/olivere/elastic/uritemplates"
+)
+
+// IndicesGetTemplateService returns an index template.
+// See http://www.elasticsearch.org/guide/en/elasticsearch/reference/1.4/indices-templates.html.
+type IndicesGetTemplateService struct {
+	client       *Client
+	pretty       bool
+	name         []string
+	flatSettings *bool
+	local        *bool
+}
+
+// NewIndicesGetTemplateService creates a new IndicesGetTemplateService.
+func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService {
+	return &IndicesGetTemplateService{
+		client: client,
+		name:   make([]string, 0),
+	}
+}
+
+// Name is the name of the index template.
+func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService {
+	s.name = append(s.name, name...)
+	return s
+}
+
+// FlatSettings is returns settings in flat format (default: false).
+func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService {
+	s.flatSettings = &flatSettings
+	return s
+}
+
+// Local indicates whether to return local information, i.e. do not retrieve
+// the state from master node (default: false).
+func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService {
+	s.local = &local
+	return s
+}
+
+// Pretty indicates that the JSON response be indented and human readable.
+func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService {
+	s.pretty = pretty
+	return s
+}
+
+// buildURL builds the URL for the operation.
+func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) {
+	// Build URL
+	var err error
+	var path string
+	if len(s.name) > 0 {
+		path, err = uritemplates.Expand("/_template/{name}", map[string]string{
+			"name": strings.Join(s.name, ","),
+		})
+	} else {
+		path = "/_template"
+	}
+	if err != nil {
+		return "", url.Values{}, err
+	}
+
+	// Add query string parameters
+	params := url.Values{}
+	if s.pretty {
+		params.Set("pretty", "1")
+	}
+	if s.flatSettings != nil {
+		params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings))
+	}
+	if s.local != nil {
+		params.Set("local", fmt.Sprintf("%v", *s.local))
+	}
+	return path, params, nil
+}
+
+// Validate checks if the operation is valid.
+func (s *IndicesGetTemplateService) Validate() error {
+	return nil
+}
+
+// Do executes the operation.
+func (s *IndicesGetTemplateService) Do() (map[string]*IndicesGetTemplateResponse, error) {
+	// Check pre-conditions
+	if err := s.Validate(); err != nil {
+		return nil, err
+	}
+
+	// Get URL for request
+	path, params, err := s.buildURL()
+	if err != nil {
+		return nil, err
+	}
+
+	// Get HTTP response
+	res, err := s.client.PerformRequest("GET", path, params, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// Return operation response
+	var ret map[string]*IndicesGetTemplateResponse
+	if err := json.Unmarshal(res.Body, &ret); err != nil {
+		return nil, err
+	}
+	return ret, nil
+}
+
+// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do.
+type IndicesGetTemplateResponse struct {
+	Order    int                    `json:"order,omitempty"`
+	Template string                 `json:"template,omitempty"`
+	Settings map[string]interface{} `json:"settings,omitempty"`
+	Mappings map[string]interface{} `json:"mappings,omitempty"`
+	Aliases  map[string]interface{} `json:"aliases,omitempty"`
+}

+ 41 - 0
sword_base/olivere/elastic.v1/indices_get_template_test.go

@@ -0,0 +1,41 @@
+// Copyright 2012-2015 Oliver Eilhard. All rights reserved.
+// Use of this source code is governed by a MIT-license.
+// See http://olivere.mit-license.org/license.txt for details.
+
+package elastic
+
+import (
+	"testing"
+)
+
+func TestIndexGetTemplateURL(t *testing.T) {
+	client := setupTestClientAndCreateIndex(t)
+
+	tests := []struct {
+		Names    []string
+		Expected string
+	}{
+		{
+			[]string{},
+			"/_template",
+		},
+		{
+			[]string{"index1"},
+			"/_template/index1",
+		},
+		{
+			[]string{"index1", "index2"},
+			"/_template/index1%2Cindex2",
+		},
+	}
+
+	for _, test := range tests {
+		path, _, err := client.IndexGetTemplate().Name(test.Names...).buildURL()
+		if err != nil {
+			t.Fatal(err)
+		}
+		if path != test.Expected {
+			t.Errorf("expected %q; got: %q", test.Expected, path)
+		}
+	}
+}

部分文件因为文件数量过多而无法显示