202 lines
4.6 KiB
Go
202 lines
4.6 KiB
Go
package fsm
|
|
|
|
import (
|
|
"bytes"
|
|
"encoding/gob"
|
|
"fmt"
|
|
"fusenapi/initalize"
|
|
"fusenapi/utils/autoconfig"
|
|
"log"
|
|
"net"
|
|
"os"
|
|
"time"
|
|
|
|
"github.com/hashicorp/raft"
|
|
"gorm.io/gorm"
|
|
)
|
|
|
|
func test1() {
|
|
log.SetFlags(log.Llongfile)
|
|
|
|
fsm := StartNode("fs1", "localhost:5500", nil, initalize.InitMysql("fusentest:XErSYmLELKMnf3Dh@tcp(110.41.19.98:3306)/fusentest"))
|
|
|
|
time.Sleep(time.Second * 5)
|
|
|
|
for i := 0; i < 30; i++ {
|
|
go log.Println(fsm.GetUserState(39))
|
|
}
|
|
|
|
log.Println(fsm.GetUserState(39))
|
|
|
|
select {}
|
|
}
|
|
|
|
// StartNode 启动节点
|
|
func StartNode(ServerID string, RaftBind string, serverconfigs []*autoconfig.ConfigServer, gdb *gorm.DB) *StateCluster {
|
|
|
|
fsm := &StateCluster{
|
|
store: make(map[int64]*UserState),
|
|
waiter: NewWaitCallback(),
|
|
gdb: gdb,
|
|
}
|
|
|
|
var retainSnapshotCount = 2
|
|
// var ServerID string = "fs1"
|
|
// var RaftBind string = "localhost:5500"
|
|
var RaftDir string = fmt.Sprintf("/tmp/raftdir/%s", ServerID)
|
|
|
|
// Setup Raft configuration.
|
|
config := raft.DefaultConfig()
|
|
config.LocalID = raft.ServerID(ServerID)
|
|
|
|
// Setup Raft communication.
|
|
addr, err := net.ResolveTCPAddr("tcp", RaftBind)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
transport, err := raft.NewTCPTransport(RaftBind, addr, 3, 30*time.Second, os.Stderr)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
|
|
// Create the snapshot store. This allows the Raft to truncate the log.
|
|
snapshots, err := raft.NewFileSnapshotStore(RaftDir, retainSnapshotCount, os.Stderr)
|
|
if err != nil {
|
|
panic(fmt.Errorf("file snapshot store: %s", err))
|
|
}
|
|
|
|
// Create the log store and stable store.
|
|
logStore := raft.NewInmemStore()
|
|
stableStore := raft.NewInmemStore()
|
|
|
|
// Create the Raft system.
|
|
fsm.ra, err = raft.NewRaft(config, fsm, logStore, stableStore, snapshots, transport)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
|
|
var dup map[string]bool = make(map[string]bool)
|
|
var rserver []raft.Server = []raft.Server{
|
|
{
|
|
Suffrage: raft.Voter,
|
|
ID: config.LocalID,
|
|
Address: transport.LocalAddr(),
|
|
},
|
|
}
|
|
dup[string(config.LocalID)] = true
|
|
dup[string("backend")] = true
|
|
dup[string("product-model")] = true
|
|
dup[string("product-template")] = true
|
|
|
|
for _, cfg := range serverconfigs {
|
|
if _, ok := dup[cfg.Name]; !ok {
|
|
dup[cfg.Name] = true
|
|
rserver = append(rserver, raft.Server{
|
|
Suffrage: raft.Voter,
|
|
ID: raft.ServerID(cfg.Name),
|
|
Address: raft.ServerAddress(fmt.Sprintf("%s:%d", cfg.Host, cfg.Port-2000)),
|
|
})
|
|
}
|
|
}
|
|
|
|
configuration := raft.Configuration{
|
|
Servers: rserver,
|
|
}
|
|
|
|
fu := fsm.ra.BootstrapCluster(configuration)
|
|
if err := fu.Error(); err != nil {
|
|
log.Println(err)
|
|
}
|
|
|
|
waitForCluster(fsm.ra)
|
|
|
|
return fsm
|
|
}
|
|
|
|
// func JoinCluster(ServerID string, LeaderAddress string, RaftBind string, gdb *gorm.DB) *StateCluster {
|
|
|
|
// fsm := StartNode(ServerID, RaftBind, gdb)
|
|
|
|
// configFuture := fsm.ra.GetConfiguration()
|
|
// if err := configFuture.Error(); err != nil {
|
|
// log.Fatalf("failed to get raft configuration: %v", err)
|
|
// }
|
|
|
|
// for _, srv := range configFuture.Configuration().Servers {
|
|
// if srv.ID == raft.ServerID(ServerID) && srv.Address == raft.ServerAddress(LeaderAddress) {
|
|
// if future := fsm.ra.RemoveServer(srv.ID, 0, 0); future.Error() != nil {
|
|
// log.Fatalf("Error removing existing server [%s]: %v", ServerID, future.Error())
|
|
// }
|
|
// }
|
|
// }
|
|
|
|
// f := fsm.ra.AddVoter(raft.ServerID(ServerID), raft.ServerAddress(RaftBind), 0, 0)
|
|
// if f.Error() != nil {
|
|
// log.Fatalf("Error adding voter: %v", f.Error())
|
|
// }
|
|
|
|
// return fsm
|
|
// }
|
|
|
|
func waitForCluster(ra *raft.Raft) {
|
|
ticker := time.NewTicker(500 * time.Millisecond)
|
|
defer ticker.Stop()
|
|
|
|
for range ticker.C {
|
|
state := ra.State()
|
|
if state == raft.Leader || state == raft.Follower {
|
|
log.Println("Raft cluster is running")
|
|
return
|
|
} else {
|
|
log.Println("Still waiting for the cluster to start...")
|
|
}
|
|
}
|
|
}
|
|
|
|
// var gdb *gorm.DB = initalize.InitMysql("fusentest:XErSYmLELKMnf3Dh@tcp(110.41.19.98:3306)/fusentest")
|
|
|
|
type UserState struct {
|
|
Expired time.Time
|
|
UserId int64
|
|
PwdHash uint64
|
|
}
|
|
|
|
func (us *UserState) Encode() []byte {
|
|
var buf = bytes.NewBuffer(nil)
|
|
err := gob.NewEncoder(buf).Encode(us)
|
|
if err != nil {
|
|
log.Panic(err)
|
|
return nil
|
|
}
|
|
|
|
return buf.Bytes()
|
|
}
|
|
|
|
// command is used for internal command representation.
|
|
type command struct {
|
|
Op string
|
|
Key int64
|
|
Value *UserState
|
|
}
|
|
|
|
func (cmd *command) Encode() []byte {
|
|
var buf = bytes.NewBuffer(nil)
|
|
err := gob.NewEncoder(buf).Encode(cmd)
|
|
if err != nil {
|
|
log.Panic(err)
|
|
return nil
|
|
}
|
|
|
|
return buf.Bytes()
|
|
}
|
|
|
|
func (cmd *command) Decode(sbuf []byte) error {
|
|
var buf = bytes.NewBuffer(sbuf)
|
|
err := gob.NewDecoder(buf).Decode(cmd)
|
|
if err != nil {
|
|
// log.Panic(err)
|
|
return err
|
|
}
|
|
return nil
|
|
}
|