Browse Source

rediscluster

张金坤 8 years ago
parent
commit
f5e8ca95fb

+ 24 - 0
common/src/github.com/jettyu/goredis/.gitignore

@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof

+ 21 - 0
common/src/github.com/jettyu/goredis/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 jettyu
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 2 - 0
common/src/github.com/jettyu/goredis/README.md

@@ -0,0 +1,2 @@
+# goredis
+redis by redigo for golang, rewrite the pool and support redis cluster

+ 24 - 0
common/src/github.com/jettyu/goredis/command.go

@@ -0,0 +1,24 @@
+package goredis
+
+type Command struct {
+	CommandName string
+	Args        []interface{}
+}
+
+func NewCommand(commandName string, args ...interface{}) (command Command) {
+	command.CommandName = commandName
+	command.Args = args
+	return command
+}
+
+func (this Command) Append(args interface{}) Command {
+	this.Args = append(this.Args, args)
+	return this
+}
+
+type Commands []Command
+
+func (this Commands) Append(command Command) Commands {
+	this = append(this, command)
+	return this
+}

+ 70 - 0
common/src/github.com/jettyu/goredis/conn.go

@@ -0,0 +1,70 @@
+package goredis
+
+import (
+	"github.com/garyburd/redigo/redis"
+)
+
+type RedisConn struct {
+	conn redis.Conn
+	pool *Pool
+	err  error
+	//	status int32
+}
+
+func (this *RedisConn) Close() error {
+	if this.err != nil || this.pool == nil {
+		return nil
+	} else if this.conn != nil {
+		if this.conn.Err() != nil {
+			return this.conn.Close()
+		} else {
+			this.pool.Put(this)
+		}
+	}
+
+	return nil
+}
+
+func (this *RedisConn) Command(commandName string, args ...interface{}) *RedisReply {
+	reply, err := this.Do(commandName, args...)
+	return NewRedisReply(reply, err)
+}
+
+func (this *RedisConn) Conn() redis.Conn {
+	return this.conn
+}
+
+func (this *RedisConn) Err() error {
+	if this.err != nil {
+		return this.err
+	}
+	return this.conn.Err()
+}
+
+func (this *RedisConn) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
+	if this.err != nil {
+		return nil, this.err
+	}
+	return this.conn.Do(commandName, args...)
+}
+
+func (this *RedisConn) Send(commandName string, args ...interface{}) error {
+	if this.err != nil {
+		return this.err
+	}
+	return this.conn.Send(commandName, args...)
+}
+
+func (this *RedisConn) Flush() error {
+	if this.err != nil {
+		return this.err
+	}
+	return this.conn.Flush()
+}
+
+func (this *RedisConn) Receive() (reply interface{}, err error) {
+	if this.err != nil {
+		return nil, this.err
+	}
+	return this.conn.Receive()
+}

+ 62 - 0
common/src/github.com/jettyu/goredis/crc16.go

@@ -0,0 +1,62 @@
+package goredis
+
+/*
+ * Note by @antirez: this is actually the XMODEM CRC 16 algorithm, using the
+ * following parameters:
+ *
+ * Name                       : "XMODEM", also known as "ZMODEM", "CRC-16/ACORN"
+ * Width                      : 16 bit
+ * Poly                       : 1021 (That is actually x^16 + x^12 + x^5 + 1)
+ * Initialization             : 0000
+ * Reflect Input byte         : False
+ * Reflect Output CRC         : False
+ * Xor constant to output CRC : 0000
+ * Output for "123456789"     : 31C3
+ *
+ * See Bottom of: http://redis.io/topics/cluster-spec
+ */
+
+// Table is a 256-word table representing the polynomial for efficient processing.
+type Table [256]uint16
+
+var RedisTable = Table{
+	0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+	0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+	0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+	0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+	0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+	0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+	0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+	0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+	0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+	0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+	0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+	0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+	0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+	0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+	0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+	0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+	0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+	0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+	0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+	0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+	0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+	0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+	0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+	0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+	0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+	0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+	0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+	0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+	0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+	0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+	0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+	0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0}
+
+func ChecksumCRC16(data []byte) uint16 {
+	crc := uint16(0)
+	for _, v := range data {
+		crc = ((crc << 8) & 0xffff) ^ RedisTable[(byte((crc>>8))^v)&0xff]
+	}
+	return crc
+}

+ 49 - 0
common/src/github.com/jettyu/goredis/example/example.go

@@ -0,0 +1,49 @@
+package main
+
+import (
+	"fmt"
+	redis "github.com/garyburd/redigo/redis"
+	"github.com/jettyu/goredis"
+)
+
+func TestPool() {
+	ri := goredis.NewPool(func() (redis.Conn, error) {
+		c, err := redis.Dial("tcp", "127.0.0.1:6379")
+		fmt.Println("Dial ...")
+		if err != nil {
+			fmt.Println(err)
+		}
+		return c, err
+	},
+		8,
+		8)
+	conn := ri.Get()
+	defer conn.Close()
+	reply, err := conn.Do("SET", "test", "hello")
+	fmt.Println(reply, err)
+	reply, err = ri.Do("GET", "test")
+	fmt.Println(reply, err)
+	rp := conn.Command("GET", "test")
+	fmt.Println(rp)
+}
+
+func TestCluster() {
+	redishosts := []string{
+		"127.0.0.1:6380",
+		"127.0.0.1:6381",
+		"127.0.0.1:6382",
+		"127.0.0.1:6383",
+		"127.0.0.1:6384",
+		"127.0.0.1:6385",
+	}
+	ri := goredis.NewRedisCluster(redishosts, 8, 64, true)
+	reply, err := ri.Do("SET", "test", "hello")
+	fmt.Println(reply, err)
+	reply, err = ri.Do("GET", "test")
+	fmt.Println(reply, err)
+}
+
+func main() {
+	TestPool()
+	//TestCluster()
+}

+ 239 - 0
common/src/github.com/jettyu/goredis/pool.go

@@ -0,0 +1,239 @@
+package goredis
+
+import (
+	"fmt"
+	"log"
+	"sync/atomic"
+	"time"
+
+	"github.com/garyburd/redigo/redis"
+)
+
+var (
+	emptyRedisConn = &RedisConn{
+		err: fmt.Errorf("[redis]Error 0003 : RedisPool Get timeout"),
+	}
+	closedRedisConn = &RedisConn{
+		err: fmt.Errorf("[redis]Error 0002 : this pool has been closed"),
+	}
+)
+
+type Pool struct {
+	callback    func() (redis.Conn, error)
+	elems       chan *RedisConn
+	maxIdle     int32
+	maxActive   int32
+	curActive   int32
+	elemsSize   int32
+	status      int32 //1-closed
+	timerStatus int32
+	waitTime    int   //time for wait
+	lifeTime    int32 //time for life,if timeout,will be close the redundant conn
+	pingTime    int32 //time for ping
+}
+
+func NewPool(callback func() (redis.Conn, error), maxIdle, maxActive int32) *Pool {
+	pool := &Pool{
+		callback:  callback,
+		elems:     make(chan *RedisConn, maxActive),
+		maxIdle:   maxIdle,
+		maxActive: maxActive,
+		waitTime:  0,
+		lifeTime:  10,
+		pingTime:  20,
+	}
+	go pool.timerEvent()
+
+	return pool
+}
+
+func (this *Pool) SetWaitTime(d int) {
+	this.waitTime = d
+}
+
+func (this *Pool) SetLifeTime(d int) {
+	atomic.StoreInt32(&this.lifeTime, int32(d))
+}
+
+func (this *Pool) SetPingTime(d int) {
+	this.pingTime = int32(d)
+}
+
+func (this *Pool) Update(maxIdle, maxActive int32) {
+
+	if maxIdle == this.maxIdle && maxActive == this.maxActive {
+		return
+	}
+	this.maxIdle = maxIdle
+	elems := this.elems
+	this.elems = make(chan *RedisConn, maxActive)
+	atomic.StoreInt32(&this.elemsSize, 0)
+	atomic.StoreInt32(&this.curActive, 0)
+	flag := true
+	for flag {
+		select {
+		case e := <-elems:
+			select {
+			case this.elems <- e:
+				atomic.AddInt32(&this.elemsSize, 1)
+				atomic.StoreInt32(&this.curActive, 1)
+			default:
+				flag = false
+			}
+		default:
+			flag = false
+		}
+	}
+	atomic.StoreInt32(&this.maxActive, maxActive)
+}
+
+func (this *Pool) TestConn() error {
+	conn := this.Get()
+	defer conn.Close()
+	return conn.Err()
+}
+
+func (this *Pool) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
+	c := this.Get()
+	defer c.Close()
+	return c.Do(commandName, args...)
+}
+
+func (this *Pool) Put(elem *RedisConn) {
+	if atomic.LoadInt32(&this.status) != 0 {
+		elem.pool = nil
+		elem.Close()
+		return
+	}
+
+	if elem.Err() != nil {
+		atomic.AddInt32(&this.curActive, -1)
+		elem.pool = nil
+		elem.Close()
+		return
+	}
+
+	select {
+	case this.elems <- elem:
+		atomic.AddInt32(&this.elemsSize, 1)
+		break
+	default:
+		elem.pool = nil
+		elem.Close()
+		atomic.AddInt32(&this.curActive, -1)
+	}
+}
+
+func (this *Pool) Get() *RedisConn {
+	var (
+		elem *RedisConn
+	)
+	for {
+		elem = this.get()
+		if elem.conn != nil && elem.conn.Err() != nil {
+			atomic.AddInt32(&this.curActive, -1)
+			elem.conn.Close()
+			continue
+		}
+		break
+	}
+
+	return elem
+}
+
+func (this *Pool) get() *RedisConn {
+	if atomic.LoadInt32(&this.status) != 0 {
+		return closedRedisConn
+	}
+	var (
+		conn *RedisConn
+	)
+	select {
+	case e := <-this.elems:
+		conn = e
+		atomic.AddInt32(&this.elemsSize, -1)
+	default:
+		ca := atomic.LoadInt32(&this.curActive)
+		if ca < this.maxActive {
+			c, err := this.callback()
+			if err != nil {
+				conn = &RedisConn{err: err}
+				break
+			}
+
+			conn = &RedisConn{
+				conn: c,
+				pool: this,
+			}
+			atomic.AddInt32(&this.curActive, 1)
+		} else {
+			log.Println("[Warn] 0001 : too many active conn, maxActive=", this.maxActive)
+			if this.waitTime != 0 {
+				select {
+				case conn = <-this.elems:
+					atomic.AddInt32(&this.elemsSize, -1)
+				case <-time.After(time.Second * time.Duration(this.waitTime)):
+					conn = emptyRedisConn
+				}
+			} else {
+				conn = <-this.elems
+			}
+		}
+
+	}
+
+	return conn
+}
+
+func (this *Pool) Close() {
+	atomic.StoreInt32(&this.status, 1)
+	for {
+		select {
+		case e := <-this.elems:
+			e.pool = nil
+			e.Close()
+		default:
+			return
+		}
+	}
+}
+
+func (this *Pool) timerEvent() {
+	timer := time.NewTicker(time.Second * 1)
+	defer timer.Stop()
+	for atomic.LoadInt32(&this.status) == 0 {
+
+		select {
+		case <-timer.C:
+			if atomic.LoadInt32(&this.elemsSize) > this.maxIdle {
+				this.timerStatus++
+				if this.timerStatus > atomic.LoadInt32(&this.lifeTime) {
+					select {
+					case e := <-this.elems:
+						atomic.AddInt32(&this.curActive, -1)
+						atomic.AddInt32(&this.elemsSize, -1)
+						e.pool = nil
+						e.Close()
+					default:
+						this.timerStatus = 0
+					}
+				} else {
+					this.timerStatus = 0
+				}
+			}
+			flag := true
+			n := int(this.elemsSize/this.pingTime + 1)
+			for i := 0; (i < n) && flag; i++ {
+				select {
+				case e := <-this.elems:
+					atomic.AddInt32(&this.elemsSize, -1)
+					e.Do("PING")
+					e.Close()
+				default:
+					flag = false
+					break
+				}
+			}
+		}
+	}
+}

+ 223 - 0
common/src/github.com/jettyu/goredis/pool_test.go

@@ -0,0 +1,223 @@
+package goredis
+
+import (
+	"reflect"
+	"testing"
+	"time"
+
+	"github.com/garyburd/redigo/redis"
+)
+
+var (
+	_testPool *Pool = NewPool(func() (redis.Conn, error) {
+		c, err := redis.Dial("tcp", "127.0.0.1:6379")
+		return c, err
+	},
+		maxIdle,
+		maxActive)
+
+	maxIdle   = int32(16)
+	maxActive = int32(1024)
+)
+
+func TestNewPool(t *testing.T) {
+	if err := _testPool.TestConn(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestPoolGet(t *testing.T) {
+	ch := make([]*RedisConn, 1024)
+	var err error
+	for i := 0; i < 1024; i++ {
+		ch[i] = _testPool.Get()
+		if ch[i].Err() != nil {
+			t.Fatal(err)
+		}
+	}
+	for i := 0; i < 1024; i++ {
+		_testPool.Put(ch[i])
+	}
+}
+
+func TestPoolDo(t *testing.T) {
+	reply, err := _testPool.Do("SET", "test", "test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, ok := reply.(string); !ok {
+		t.Fatal("wrong reply type|type=", reflect.TypeOf(reply))
+	} else if reply.(string) != "OK" {
+		t.Fatal("reply wrong|reply=", reply)
+	}
+	reply, err = _testPool.Do("GET", "test")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if _, ok := reply.([]uint8); !ok {
+		t.Fatal("wrong reply type|type=", reflect.TypeOf(reply))
+	} else if string(reply.([]uint8)) != "test" {
+		t.Fatal("reply wrong|reply=", reply)
+	}
+	_testPool.Do("DEL", "test")
+}
+
+func TestPoolTimerEvent(t *testing.T) {
+	_testPool.Close()
+	maxIdle = int32(3)
+	maxActive = int32(8)
+	_testPool = NewPool(func() (redis.Conn, error) {
+		c, err := redis.Dial("tcp", "192.168.1.202:6379")
+		if err != nil {
+			t.Fatal(err)
+		}
+		return c, err
+	},
+		maxIdle,
+		maxActive)
+	_testPool.SetLifeTime(0)
+	elems := make([]*RedisConn, maxActive)
+	for i := 0; i < int(maxActive); i++ {
+		elems[i] = _testPool.Get()
+		if elems[i].Err() != nil {
+			t.Fatal(elems[i].Err())
+		}
+	}
+	if _testPool.curActive != maxActive {
+		t.Fatal("size wrong|curActive=", _testPool.curActive, "|maxActive=", maxActive)
+	}
+	for i := 0; i < int(maxActive); i++ {
+		elems[i].Close()
+	}
+	if _testPool.elemsSize != maxActive {
+		t.Fatal("size wrong|elemsSize=", _testPool.elemsSize, "|maxActive=", maxActive)
+	}
+	time.AfterFunc(time.Second*3, func() {
+		if _testPool.elemsSize != maxActive-3 || _testPool.curActive != maxActive-3 {
+			t.Fatal("elemsSize=", _testPool.elemsSize, "|curActive=", _testPool.curActive)
+		}
+	})
+	time.Sleep(time.Second * 4)
+}
+
+func TestPoolWait(t *testing.T) {
+	_testPool.Update(1, 1)
+	_testPool.SetWaitTime(1)
+	{
+		conn := _testPool.Get()
+		if conn.Err() != nil {
+			t.Error(conn.Err())
+		}
+		time.AfterFunc(time.Millisecond*900, func() { conn.Close() })
+	}
+	{
+		conn := _testPool.Get()
+		if conn.Err() != nil {
+			t.Error(conn.Err())
+		}
+		conn.Close()
+	}
+	{
+		conn := _testPool.Get()
+		if conn.Err() != nil {
+			t.Error(conn.Err())
+		}
+		time.AfterFunc(time.Millisecond*1100, func() { conn.Close() })
+	}
+	{
+		conn := _testPool.Get()
+		defer conn.Close()
+		if conn.Err() == nil {
+			t.Error("failed")
+		}
+	}
+}
+
+func TestPoolSend(t *testing.T) {
+	conn := _testPool.Get()
+	defer conn.Close()
+	{
+		if err := conn.Send("SET", "SEND", "test"); err != nil {
+			t.Error(err)
+		}
+		if err := conn.Send("GET", "SEND"); err != nil {
+			t.Error(err)
+		}
+		if err := conn.Send("DEL", "SEND"); err != nil {
+			t.Error(err)
+		}
+	}
+	{
+		if err := conn.Flush(); err != nil {
+			t.Error(err)
+		}
+	}
+	{
+		rp, err := conn.Receive()
+		if err != nil {
+			t.Error(err)
+		}
+		if rp.(string) != "OK" {
+			t.Error(rp)
+		}
+	}
+	{
+		rp, err := conn.Receive()
+		if err != nil {
+			t.Error(err)
+		}
+		if string(rp.([]byte)) != "test" {
+			t.Error(rp)
+		}
+	}
+	{
+		rp, err := conn.Receive()
+		if err != nil {
+			t.Error(err)
+		}
+		if rp.(int64) != 1 {
+			t.Error(rp)
+		}
+	}
+}
+
+func BenchmarkPoolDo(b *testing.B) {
+	_testPool.Update(100, 10000)
+	key := "testbenchmark"
+	if _, err := _testPool.Do("SET", key, "1"); err != nil {
+		b.Fatal(err)
+	}
+	failedNum := 0
+	for i := 0; i < b.N; i++ {
+		if _, err := _testPool.Do("GET", key); err != nil {
+			failedNum++
+		}
+	}
+	if _, err := _testPool.Do("DEL", key); err != nil {
+		b.Fatal(err)
+	}
+	if failedNum != 0 {
+		b.Error("failedNum=", failedNum)
+	}
+}
+
+func BenchmarkConnDo(b *testing.B) {
+	conn := _testPool.Get()
+	defer conn.Close()
+	key := "testbenchmark"
+	if _, err := conn.Do("SET", key, "1"); err != nil {
+		b.Fatal(err)
+	}
+	failedNum := 0
+	for i := 0; i < b.N; i++ {
+		if _, err := conn.Do("GET", key); err != nil {
+			failedNum++
+		}
+	}
+	if _, err := conn.Do("DEL", key); err != nil {
+		b.Fatal(err)
+	}
+	if failedNum != 0 {
+		b.Error("failedNum=", failedNum)
+	}
+}

+ 494 - 0
common/src/github.com/jettyu/goredis/rediscluster.go

@@ -0,0 +1,494 @@
+package goredis
+
+import "strings"
+import "strconv"
+import "errors"
+import "math/rand"
+import "os"
+import "fmt"
+
+const RedisClusterHashSlots = 16384
+const RedisClusterRequestTTL = 16
+const RedisClusterDefaultTimeout = 1
+
+type RedisCluster struct {
+	SeedHosts        map[string]bool
+	Handles          map[string]*RedisHandle
+	Slots            map[uint16]string
+	RefreshTableASAP bool
+	SingleRedisMode  bool
+	MaxIdle          int
+	MaxActive        int
+	Debug            bool
+}
+
+func NewRedisCluster(addrs []string, max_idle, max_active int, debug bool) RedisCluster {
+	cluster := RedisCluster{RefreshTableASAP: false,
+		SingleRedisMode: false,
+		SeedHosts:       make(map[string]bool),
+		Handles:         make(map[string]*RedisHandle),
+		Slots:           make(map[uint16]string),
+		MaxActive:       max_active,
+		Debug:           debug}
+
+	if cluster.Debug {
+		fmt.Println("[RedisCluster], PID", os.Getpid(), "StartingNewRedisCluster")
+	}
+
+	for _, label := range addrs {
+		cluster.SeedHosts[label] = true
+		cluster.Handles[label] = NewRedisHandle(label, max_idle, max_active, debug)
+	}
+
+	for addr, _ := range cluster.SeedHosts {
+		node, ok := cluster.Handles[addr]
+		if !ok {
+			node = NewRedisHandle(addr, cluster.MaxIdle, cluster.MaxActive, cluster.Debug)
+			cluster.Handles[addr] = node
+		}
+
+		cluster_enabled := cluster.hasClusterEnabled(node)
+		if cluster_enabled == false {
+			if len(cluster.SeedHosts) == 1 {
+				cluster.SingleRedisMode = true
+			} else {
+				panic(errors.New("Multiple Seed Hosts Given, But Cluster Support Disabled in Redis"))
+			}
+		}
+	}
+
+	if cluster.SingleRedisMode == false {
+		cluster.populateSlotsCache()
+	}
+	return cluster
+}
+
+func (self *RedisCluster) Update(max_idle, max_active int32) {
+	for _, rh := range self.Handles {
+		rh.Pool.Update(max_idle, max_active)
+	}
+}
+
+func (self *RedisCluster) SetWaitTime(t int) {
+	for _, rh := range self.Handles {
+		rh.Pool.SetWaitTime(t)
+	}
+}
+
+func (self *RedisCluster) SetLifeTime(t int) {
+	for _, rh := range self.Handles {
+		rh.Pool.SetLifeTime(t)
+	}
+}
+
+func (self *RedisCluster) SetPingTime(t int) {
+	for _, rh := range self.Handles {
+		rh.Pool.SetPingTime(t)
+	}
+}
+
+func (self *RedisCluster) TestCluster() error {
+	for _, rh := range self.Handles {
+		_, err := rh.Do("CLUSTER", "INFO")
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (self *RedisCluster) GetHandle(key string) *RedisHandle {
+	return self.HandleForKey(key)
+}
+
+func (self *RedisCluster) Do(cmd string, args ...interface{}) (reply interface{}, err error) {
+	return self.SendClusterCommand(cmd, args...)
+}
+
+func (self *RedisCluster) hasClusterEnabled(node *RedisHandle) bool {
+	_, err := node.Do("CLUSTER", "INFO")
+	if err != nil {
+		if err.Error() == "ERR This instance has cluster support disabled" ||
+			err.Error() == "ERR unknown command 'CLUSTER'" {
+			return false
+		}
+	}
+	return true
+}
+
+// contact the startup nodes and try to fetch the hash slots -> instances
+// map in order to initialize the Slots map.
+func (self *RedisCluster) populateSlotsCache() {
+	if self.SingleRedisMode == true {
+		return
+	}
+	if self.Debug {
+		fmt.Println("[RedisCluster], PID", os.Getpid(), "[PopulateSlots Running]")
+	}
+	seedHosts := make(map[string]bool)
+	handles := make(map[string]*RedisHandle)
+	slotsMap := make(map[uint16]string)
+	for k, v := range self.SeedHosts {
+		seedHosts[k] = v
+	}
+	for k, v := range self.Handles {
+		handles[k] = v
+	}
+	for k, v := range self.Slots {
+		slotsMap[k] = v
+	}
+	for name, _ := range self.SeedHosts {
+		if self.Debug {
+			fmt.Println("[RedisCluster] [PopulateSlots] Checking: ", name)
+		}
+		var (
+			node *RedisHandle
+			ok   bool
+		)
+		if node, ok = handles[name]; !ok {
+			node = NewRedisHandle(name, self.MaxIdle, self.MaxActive, self.Debug)
+			handles[name] = node
+		}
+		cluster_info, err := node.Do("CLUSTER", "NODES")
+		if err == nil {
+			lines := strings.Split(string(cluster_info.([]uint8)), "\n")
+			for _, line := range lines {
+				if line != "" {
+					fields := strings.Split(line, " ")
+					addr := fields[1]
+					if addr == ":0" {
+						addr = name
+					}
+					// add to seedlist if not in cluster
+					seedHosts[addr] = true
+
+					// add to handles if not in handles
+					if _, ok := handles[name]; !ok {
+						handles[name] = NewRedisHandle(addr, self.MaxIdle, self.MaxActive, self.Debug)
+					}
+
+					slots := fields[8:len(fields)]
+					for _, s_range := range slots {
+						slot_range := s_range
+						if slot_range != "[" {
+							if self.Debug {
+								fmt.Println("[RedisCluster] Considering Slot Range", slot_range)
+							}
+							r_pieces := strings.Split(slot_range, "-")
+							min, _ := strconv.Atoi(r_pieces[0])
+							max, _ := strconv.Atoi(r_pieces[1])
+							for i := min; i <= max; i++ {
+								slotsMap[uint16(i)] = addr
+							}
+						}
+					}
+				}
+			}
+			if self.Debug {
+				fmt.Println("[RedisCluster] [Initializing] DONE, ",
+					"Slots: ", len(slotsMap),
+					"Handles So Far:", len(handles),
+					"SeedList:", len(seedHosts))
+			}
+			break
+		}
+	}
+	self.SeedHosts = seedHosts
+	self.Handles = handles
+	self.Slots = slotsMap
+	self.switchToSingleModeIfNeeded()
+}
+
+func (self *RedisCluster) switchToSingleModeIfNeeded() {
+	// catch case where we really intend to be on
+	// single redis mode, but redis was not
+	// started on time
+	if len(self.SeedHosts) == 1 &&
+		len(self.Slots) == 0 &&
+		len(self.Handles) == 1 {
+		for _, node := range self.Handles {
+			cluster_enabled := self.hasClusterEnabled(node)
+			if cluster_enabled == false {
+				self.SingleRedisMode = true
+			}
+		}
+	}
+}
+
+func (self *RedisCluster) KeyForRequest(cmd string, args ...interface{}) string {
+	cmd = strings.ToLower(cmd)
+	if cmd == "info" ||
+		cmd == "multi" ||
+		cmd == "exec" ||
+		cmd == "slaveof" ||
+		cmd == "config" ||
+		cmd == "shutdown" {
+		return ""
+	}
+	if args[0] == nil {
+		return ""
+	}
+	strs := args[0].([]interface{})
+	if strs != nil && strs[0] != nil {
+		return strs[0].(string)
+	}
+	return ""
+}
+
+// Return the hash slot from the key.
+func (self *RedisCluster) SlotForKey(key string) uint16 {
+	checksum := ChecksumCRC16([]byte(key))
+	slot := checksum % RedisClusterHashSlots
+	return slot
+}
+
+func (self *RedisCluster) RandomRedisHandle() *RedisHandle {
+	if len(self.Handles) == 0 {
+		return nil
+	}
+	addrs := make([]string, len(self.Handles))
+	i := 0
+	for addr, _ := range self.Handles {
+		addrs[i] = addr
+		i++
+	}
+	rand_addrs := make([]string, i)
+	perm := rand.Perm(i)
+	for j, v := range perm {
+		rand_addrs[v] = addrs[j]
+	}
+	handle := self.Handles[rand_addrs[0]]
+	self.switchToSingleModeIfNeeded()
+	return handle
+}
+
+// Given a slot return the link (Redis instance) to the mapped node.
+// Make sure to create a connection with the node if we don't have
+// one.
+func (self *RedisCluster) RedisHandleForSlot(slot uint16) *RedisHandle {
+
+	node, exists := self.Slots[slot]
+	// If we don't know what the mapping is, return a random node.
+	if !exists {
+		if self.Debug {
+			fmt.Println("[RedisCluster] No One Appears Responsible For Slot: ", slot, "our slotsize is: ", len(self.Slots))
+		}
+		return self.RandomRedisHandle()
+	}
+
+	r, cx_exists := self.Handles[node]
+	// add to cluster if not in cluster
+	if cx_exists {
+		return r
+	} else {
+		r = NewRedisHandle(node, self.MaxIdle, self.MaxActive, self.Debug)
+		handles := make(map[string]*RedisHandle)
+		for k, v := range self.Handles {
+			handles[k] = v
+		}
+		handles[node] = r
+		self.Handles = handles
+	}
+	// XXX consider returning random if failure
+	return r
+}
+
+func (self *RedisCluster) disconnectAll() {
+	if self.Debug {
+		fmt.Println("[RedisCluster] PID:", os.Getpid(), " [Disconnect!] Had Handles:", len(self.Handles))
+	}
+	// disconnect anyone in handles
+	for _, handle := range self.Handles {
+		handle.Pool.Close()
+	}
+	self.Handles = make(map[string]*RedisHandle)
+
+	// nuke slots
+	self.Slots = make(map[uint16]string)
+}
+
+func (self *RedisCluster) handleSingleMode(flush bool, cmd string, args ...interface{}) (reply interface{}, err error) {
+	for _, handle := range self.Handles {
+		return handle.Do(cmd, args...)
+	}
+	return nil, errors.New("no redis handle found for single mode")
+}
+
+func (self *RedisCluster) SendClusterCommand(cmd string, args ...interface{}) (reply interface{}, err error) {
+	var flush bool = true
+	// forward onto first redis in the handle
+	// if we are set to single mode
+	if self.SingleRedisMode == true {
+		return self.handleSingleMode(flush, cmd, args...)
+	}
+
+	if self.RefreshTableASAP == true {
+		if self.Debug {
+			fmt.Println("[RedisCluster] Refresh Needed")
+		}
+		self.disconnectAll()
+		self.populateSlotsCache()
+		self.RefreshTableASAP = false
+		// in case we realized we were now in Single Mode
+		if self.SingleRedisMode == true {
+			return self.handleSingleMode(flush, cmd, args...)
+		}
+	}
+
+	ttl := RedisClusterRequestTTL
+	key := self.KeyForRequest(cmd, args)
+	try_random_node := false
+	asking := false
+	for {
+		if ttl <= 0 {
+			break
+		}
+		ttl -= 1
+		if key == "" {
+			panic(errors.New("no way to dispatch this type of command to redis cluster"))
+		}
+		slot := self.SlotForKey(key)
+
+		var redis *RedisHandle
+
+		if self.Debug {
+			fmt.Println("[RedisCluster] slot: ", slot, "key", key, "ttl", ttl)
+		}
+
+		if try_random_node {
+			if self.Debug {
+				fmt.Println("[RedisCluster] Trying Random Node")
+			}
+			redis = self.RandomRedisHandle()
+			try_random_node = false
+		} else {
+			if self.Debug {
+				fmt.Println("[RedisCluster] Trying Specific Node")
+			}
+			redis = self.RedisHandleForSlot(slot)
+		}
+
+		if redis == nil {
+			if self.Debug {
+				fmt.Println("[RedisCluster] could not get redis handle, bailing this round")
+			}
+			break
+		}
+		if self.Debug {
+			fmt.Println("[RedisCluster] Got addr: ", redis.Addr)
+		}
+
+		if asking {
+			if self.Debug {
+				fmt.Println("ASKING")
+			}
+			//	conn := redis.GetRedisConn()
+			redis.Do("ASKING")
+			//	conn.Close()
+			asking = false
+		}
+
+		var err error
+		var resp interface{}
+
+		if flush {
+			resp, err = redis.Do(cmd, args...)
+			if err == nil {
+				if self.Debug {
+					fmt.Println("[RedisCluster] Success")
+				}
+				return resp, nil
+			}
+		} /*else {
+			err = redis.Send(cmd, args...)
+			if err == nil {
+				if self.Debug {
+					fmt.Println("[RedisCluster] Success")
+				}
+				return nil, nil
+			}
+		}*/
+
+		// ok we are here so err is not nil
+		errv := strings.Split(err.Error(), " ")
+		if errv[0] == "MOVED" || errv[0] == "ASK" {
+			if errv[0] == "ASK" {
+				if self.Debug {
+					fmt.Println("[RedisCluster] ASK")
+				}
+				asking = true
+			} else {
+				// Serve replied with MOVED. It's better for us to
+				// ask for CLUSTER NODES the next time.
+				SetRefreshNeeded()
+				newslot, _ := strconv.Atoi(errv[1])
+				newaddr := errv[2]
+				slotsMap := make(map[uint16]string)
+				for k, v := range self.Slots {
+					slotsMap[k] = v
+				}
+				slotsMap[uint16(newslot)] = newaddr
+				self.Slots = slotsMap
+				if self.Debug {
+					fmt.Println("[RedisCluster] MOVED newaddr: ", newaddr, "new slot: ", newslot, "my slots len: ", len(self.Slots))
+				}
+			}
+		} else {
+			if self.Debug {
+				fmt.Println("[RedisCluster] Other Error: ", err.Error())
+			}
+			try_random_node = true
+		}
+	}
+	if self.Debug {
+		fmt.Println("[RedisCluster] Failed Command")
+	}
+	return nil, errors.New("could not complete command")
+}
+
+//func (self *RedisCluster) Send(cmd string, args ...interface{}) (err error) {
+//	_, err = self.SendClusterCommand(false, cmd, args...)
+//	return err
+//}
+
+func (self *RedisCluster) SetRefreshNeeded() {
+	self.RefreshTableASAP = true
+}
+
+func (self *RedisCluster) HandleForKey(key string) *RedisHandle {
+	// forward onto first redis in the handle
+	// if we are set to single mode
+	if self.SingleRedisMode == true {
+		for _, handle := range self.Handles {
+			return handle
+		}
+	}
+	slot := self.SlotForKey(key)
+	handle := self.RedisHandleForSlot(slot)
+	return handle
+}
+
+type RedisClusterAccess interface {
+	Do(commandName string, args ...interface{}) (reply interface{}, err error)
+	//	Send(cmd string, args ...interface{}) (err error)
+	SetRefreshNeeded()
+	HandleForKey(key string) *RedisHandle
+}
+
+var Instance RedisCluster
+
+func Do(commandName string, args ...interface{}) (reply interface{}, err error) {
+	return Instance.Do(commandName, args...)
+}
+
+//func Send(cmd string, args ...interface{}) (err error) {
+//	return Instance.Send(cmd, args...)
+//}
+
+func SetRefreshNeeded() {
+	Instance.SetRefreshNeeded()
+}
+
+func HandleForKey(key string) *RedisHandle {
+	return Instance.HandleForKey(key)
+}

+ 80 - 0
common/src/github.com/jettyu/goredis/rediscluster_test.go

@@ -0,0 +1,80 @@
+package goredis
+
+import (
+	"testing"
+)
+
+var (
+	_testCluster RedisCluster = NewRedisCluster(
+		[]string{
+			"127.0.0.1:7000",
+			"127.0.0.1:7001",
+			"127.0.0.1:7002",
+			"127.0.0.1:7003",
+			"127.0.0.1:7004",
+			"127.0.0.1:7005",
+		},
+		8,
+		8,
+		false,
+	)
+)
+
+func TestRedisCluster(t *testing.T) {
+	if err := _testCluster.TestCluster(); err != nil {
+		t.Fatal(err)
+	}
+}
+
+func TestClusterDo(t *testing.T) {
+	{
+		rp, err := _testCluster.Do("SET", "CLUSTER", "test")
+		if err != nil {
+			t.Fatal(err)
+		}
+		if rp.(string) != "OK" {
+			t.Fatal(rp)
+		}
+	}
+	_testCluster.Do("DEL", "CLUSTER")
+}
+
+func TestClusterGetHandle(t *testing.T) {
+	rh := _testCluster.GetHandle("CLUSTER")
+	if rh == nil {
+		t.Fatal("rh is nil")
+	}
+	conn := rh.Get()
+	defer conn.Close()
+	if err := conn.Send("SET", "CLUSTER", "test"); err != nil {
+		t.Fatal(err)
+	}
+	if err := conn.Send("GET", "CLUSTER"); err != nil {
+		t.Fatal(err)
+	}
+	if err := conn.Send("DEL", "CLUSTER"); err != nil {
+		t.Fatal(err)
+	}
+	if err := conn.Flush(); err != nil {
+		t.Fatal(err)
+	}
+	{
+		replys := make([]interface{}, 3)
+		errs := make([]error, 3)
+		for i := 0; i < 3; i++ {
+			replys[i], errs[i] = conn.Receive()
+			if errs[i] != nil {
+				t.Fatal(errs[i])
+			}
+		}
+		if replys[0].(string) != "OK" {
+			t.Fatal(replys[0])
+		}
+		if string(replys[1].([]byte)) != "test" {
+			t.Fatal(replys[1])
+		}
+		if replys[2].(int64) != 1 {
+			t.Fatal(replys[2])
+		}
+	}
+}

+ 38 - 0
common/src/github.com/jettyu/goredis/redishandle.go

@@ -0,0 +1,38 @@
+package goredis
+
+import "github.com/garyburd/redigo/redis"
+import "os"
+import "fmt"
+
+type RedisHandle struct {
+	Addr string
+	*Pool
+}
+
+// XXX: add some password protection
+func NewRedisHandle(addr string, max_idle, max_active int, debug bool) *RedisHandle {
+	if debug {
+		fmt.Println("[RedisHandle] Opening New Handle For Pid:", os.Getpid())
+	}
+	rh := &RedisHandle{
+		Addr: addr,
+		Pool: NewPool(func() (redis.Conn, error) {
+			c, err := redis.Dial("tcp", addr)
+			if err != nil {
+				return nil, err
+			}
+			return c, nil
+		},
+			int32(max_idle),
+			int32(max_active)),
+	}
+
+	return rh
+}
+
+// XXX: is _not_ calling defer rc.Close()
+//      so do it yourself later
+//func (self *RedisHandle) Send(cmd string, args ...interface{}) (err error) {
+//	rc := self.GetRedisConn()
+//	return rc.Send(cmd, args...)
+//}

+ 51 - 0
common/src/github.com/jettyu/goredis/reply.go

@@ -0,0 +1,51 @@
+package goredis
+
+const (
+	REDIS_REPLY_STRING  = 1
+	REDIS_REPLY_ARRAY   = 2
+	REDIS_REPLY_INTEGER = 3
+	REDIS_REPLY_NIL     = 4
+	REDIS_REPLY_STATUS  = 5
+	REDIS_REPLY_ERROR   = 6
+)
+
+type RedisReply struct {
+	Type     int           /* REDIS_REPLY_* */
+	Integer  int64         /* The integer when type is REDIS_REPLY_INTEGER */
+	Len      int           /* Length of string */
+	Str      string        /* Used for both REDIS_REPLY_ERROR and REDIS_REPLY_STRING */
+	Elements int           /* number of elements, for REDIS_REPLY_ARRAY */
+	Element  []*RedisReply /* elements vector for REDIS_REPLY_ARRAY */
+}
+
+func NewRedisReply(re interface{}, err error) *RedisReply {
+	reply := &RedisReply{}
+	if err != nil {
+		reply.Type = REDIS_REPLY_ERROR
+		reply.Str = err.Error()
+		reply.Len = len(reply.Str)
+		return reply
+	}
+	if re == nil {
+		reply.Type = REDIS_REPLY_NIL
+		return reply
+	}
+	switch re.(type) {
+	case []uint8:
+		reply.Type = REDIS_REPLY_STRING
+		reply.Str = string(re.([]uint8))
+		reply.Len = len(reply.Str)
+	case []interface{}:
+		reply.Type = REDIS_REPLY_ARRAY
+		reply.Elements = len(re.([]interface{}))
+		replys := make([]*RedisReply, reply.Elements)
+		for i, r := range re.([]interface{}) {
+			replys[i] = NewRedisReply(r, nil)
+		}
+		reply.Element = replys
+	case int64:
+		reply.Type = REDIS_REPLY_INTEGER
+		reply.Integer = re.(int64)
+	}
+	return reply
+}