fusenapi/fsm/main.go
2023-07-30 18:50:27 +08:00

334 lines
7.8 KiB
Go

package fsm
import (
"bytes"
"encoding/gob"
"flag"
"fmt"
"fusenapi/initalize"
"fusenapi/utils/autoconfig"
"log"
"net"
"os"
"os/signal"
"path/filepath"
"runtime"
"syscall"
"time"
"github.com/hashicorp/raft"
"github.com/lni/dragonboat"
"github.com/lni/dragonboat/config"
"github.com/lni/dragonboat/logger"
"gorm.io/gorm"
)
func test1() {
log.SetFlags(log.Llongfile)
fsm := StartNode("fs1", "localhost:5500", nil, initalize.InitMysql("fusentest:XErSYmLELKMnf3Dh@tcp(110.41.19.98:3306)/fusentest"))
time.Sleep(time.Second * 5)
for i := 0; i < 30; i++ {
go log.Println(fsm.GetUserState(39))
}
log.Println(fsm.GetUserState(39))
select {}
}
var addresses []string = []string{
"localhost:5500",
"localhost:5501",
"localhost:5502",
}
func StartNode(replicaID uint64, exampleShardID uint64, addr string, gdb *gorm.DB) *dragonboat.NodeHost {
// addr := "localhost"
// addr = fmt.Sprintf("%s:%d", addr, port)
flag.Parse()
if len(addr) == 0 && replicaID != 1 && replicaID != 2 && replicaID != 3 {
fmt.Fprintf(os.Stderr, "node id must be 1, 2 or 3 when address is not specified\n")
os.Exit(1)
}
// https://github.com/golang/go/issues/17393
if runtime.GOOS == "darwin" {
signal.Ignore(syscall.Signal(0xd))
}
initialMembers := make(map[uint64]string)
// when joining a new node which is not an initial members, the initialMembers
// map should be empty.
// when restarting a node that is not a member of the initial nodes, you can
// leave the initialMembers to be empty. we still populate the initialMembers
// here for simplicity.
for idx, v := range addresses {
// key is the ReplicaID, ReplicaID is not allowed to be 0
// value is the raft address
initialMembers[uint64(idx+1)] = v
}
// for simplicity, in this example program, addresses of all those 3 initial
// raft members are hard coded. when address is not specified on the command
// line, we assume the node being launched is an initial raft member.
var nodeAddr = initialMembers[uint64(replicaID)]
fmt.Fprintf(os.Stdout, "node address: %s\n", nodeAddr)
// change the log verbosity
logger.GetLogger("raft").SetLevel(logger.ERROR)
logger.GetLogger("rsm").SetLevel(logger.WARNING)
logger.GetLogger("transport").SetLevel(logger.WARNING)
logger.GetLogger("grpc").SetLevel(logger.WARNING)
// config for raft node
// See GoDoc for all available options
rc := config.Config{
// ShardID and ReplicaID of the raft node
ReplicaID: uint64(replicaID),
ShardID: exampleShardID,
ElectionRTT: 10,
HeartbeatRTT: 1,
CheckQuorum: true,
SnapshotEntries: 10,
CompactionOverhead: 5,
}
datadir := filepath.Join(
"example-data",
"queue-data",
fmt.Sprintf("node%d", replicaID))
nhc := config.NodeHostConfig{
WALDir: datadir,
// NodeHostDir is where everything else is stored.
NodeHostDir: datadir,
// RTTMillisecond is the average round trip time between NodeHosts (usually
// on two machines/vms), it is in millisecond. Such RTT includes the
// processing delays caused by NodeHosts, not just the network delay between
// two NodeHost instances.
RTTMillisecond: 200,
// RaftAddress is used to identify the NodeHost instance
RaftAddress: nodeAddr,
}
nh, err := dragonboat.NewNodeHost(nhc)
if err != nil {
panic(err)
}
if err := nh.StartReplica(initialMembers, false, NewSMQueue, rc); err != nil {
fmt.Fprintf(os.Stderr, "failed to add cluster, %v\n", err)
os.Exit(1)
}
return nh
}
// StartNode 启动节点
func StartNode1(ServerID string, RaftBind string, serverconfigs []*autoconfig.ConfigServer, gdb *gorm.DB) *StateCluster {
fsm := &StateCluster{
store: make(map[int64]*UserState),
gdb: gdb,
}
var retainSnapshotCount = 2
// var ServerID string = "fs1"
// var RaftBind string = "localhost:5500"
var RaftDir string = fmt.Sprintf("/tmp/raftdir/%s", ServerID)
// Setup Raft configuration.
config := raft.DefaultConfig()
config.LocalID = raft.ServerID(ServerID)
// Setup Raft communication.
addr, err := net.ResolveTCPAddr("tcp", RaftBind)
if err != nil {
panic(err)
}
transport, err := raft.NewTCPTransport(RaftBind, addr, 3, 30*time.Second, os.Stderr)
if err != nil {
panic(err)
}
// Create the snapshot store. This allows the Raft to truncate the log.
snapshots, err := raft.NewFileSnapshotStore(RaftDir, retainSnapshotCount, os.Stderr)
if err != nil {
panic(fmt.Errorf("file snapshot store: %s", err))
}
// Create the log store and stable store.
logStore := raft.NewInmemStore()
stableStore := raft.NewInmemStore()
// Create the Raft system.
fsm.ra, err = raft.NewRaft(config, fsm, logStore, stableStore, snapshots, transport)
if err != nil {
panic(err)
}
var dup map[string]bool = make(map[string]bool)
var rserver []raft.Server = []raft.Server{
{
Suffrage: raft.Voter,
ID: config.LocalID,
Address: transport.LocalAddr(),
},
}
dup[string(config.LocalID)] = true
dup[string("backend")] = true
dup[string("product-model")] = true
dup[string("product-template")] = true
for _, cfg := range serverconfigs {
if _, ok := dup[cfg.Name]; !ok {
dup[cfg.Name] = true
rserver = append(rserver, raft.Server{
Suffrage: raft.Voter,
ID: raft.ServerID(cfg.Name),
Address: raft.ServerAddress(fmt.Sprintf("%s:%d", cfg.Host, cfg.Port-2000)),
})
}
}
configuration := raft.Configuration{
Servers: rserver,
}
fu := fsm.ra.BootstrapCluster(configuration)
if err := fu.Error(); err != nil {
log.Println(err)
}
waitForCluster(fsm.ra)
return fsm
}
// func JoinCluster(ServerID string, LeaderAddress string, RaftBind string, gdb *gorm.DB) *StateCluster {
// fsm := StartNode(ServerID, RaftBind, gdb)
// configFuture := fsm.ra.GetConfiguration()
// if err := configFuture.Error(); err != nil {
// log.Fatalf("failed to get raft configuration: %v", err)
// }
// for _, srv := range configFuture.Configuration().Servers {
// if srv.ID == raft.ServerID(ServerID) && srv.Address == raft.ServerAddress(LeaderAddress) {
// if future := fsm.ra.RemoveServer(srv.ID, 0, 0); future.Error() != nil {
// log.Fatalf("Error removing existing server [%s]: %v", ServerID, future.Error())
// }
// }
// }
// f := fsm.ra.AddVoter(raft.ServerID(ServerID), raft.ServerAddress(RaftBind), 0, 0)
// if f.Error() != nil {
// log.Fatalf("Error adding voter: %v", f.Error())
// }
// return fsm
// }
func waitForCluster(ra *raft.Raft) {
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
for range ticker.C {
state := ra.State()
if state == raft.Leader || state == raft.Follower {
log.Println("Raft cluster is running")
return
} else {
log.Println("Still waiting for the cluster to start...")
}
}
}
// var gdb *gorm.DB = initalize.InitMysql("fusentest:XErSYmLELKMnf3Dh@tcp(110.41.19.98:3306)/fusentest")
type UserState struct {
UpdateAt time.Time
UserId int64
PwdHash uint64
}
func (us *UserState) Encode(do func([]byte) error) error {
var buf bytes.Buffer
err := gob.NewEncoder(&buf).Encode(us)
if err != nil {
return err
}
if do != nil {
err := do(buf.Bytes())
if err != nil {
return err
}
}
return nil
}
func (us *UserState) Decode(data []byte) error {
buf := bytes.NewBuffer(data)
err := gob.NewDecoder(buf).Decode(us)
if err != nil {
return err
}
return nil
}
type OperateType string
const (
OP_Update OperateType = "update"
)
// Command is used for internal Command representation.
type Command struct {
Op OperateType
Key int64
Value *UserState
}
func (cmd *Command) Encode(do func(buf []byte) error) error {
var buf bytes.Buffer
err := gob.NewEncoder(&buf).Encode(cmd)
if err != nil {
return err
}
if do != nil {
err := do(buf.Bytes())
if err != nil {
return err
}
}
return nil
}
func (cmd *Command) Decode(sbuf []byte) error {
var buf = bytes.NewBuffer(sbuf)
err := gob.NewDecoder(buf).Decode(cmd)
if err != nil {
// log.Panic(err)
return err
}
return nil
}