2014-08-25 10:18:23 +04:00
|
|
|
package server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"runtime"
|
2014-10-30 05:51:02 +03:00
|
|
|
"runtime/debug"
|
2014-08-25 10:18:23 +04:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2014-10-30 05:51:02 +03:00
|
|
|
"time"
|
2015-05-04 17:42:28 +03:00
|
|
|
|
|
|
|
"github.com/siddontang/go/sync2"
|
2014-08-25 10:18:23 +04:00
|
|
|
)
|
|
|
|
|
|
|
|
type info struct {
|
|
|
|
sync.Mutex
|
|
|
|
|
|
|
|
app *App
|
|
|
|
|
|
|
|
Server struct {
|
|
|
|
OS string
|
|
|
|
ProceessId int
|
|
|
|
}
|
|
|
|
|
2014-10-13 10:37:31 +04:00
|
|
|
Replication struct {
|
2014-10-22 05:11:14 +04:00
|
|
|
PubLogNum sync2.AtomicInt64
|
|
|
|
PubLogAckNum sync2.AtomicInt64
|
|
|
|
PubLogTotalAckTime sync2.AtomicDuration
|
|
|
|
|
|
|
|
MasterLastLogID sync2.AtomicUint64
|
2014-10-13 10:37:31 +04:00
|
|
|
}
|
2014-08-25 10:18:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
func newInfo(app *App) (i *info, err error) {
|
|
|
|
i = new(info)
|
|
|
|
|
|
|
|
i.app = app
|
|
|
|
|
|
|
|
i.Server.OS = runtime.GOOS
|
|
|
|
i.Server.ProceessId = os.Getpid()
|
|
|
|
|
|
|
|
return i, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *info) Close() {
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func getMemoryHuman(m uint64) string {
|
|
|
|
if m > GB {
|
2014-10-30 05:51:02 +03:00
|
|
|
return fmt.Sprintf("%0.3fG", float64(m)/float64(GB))
|
2014-08-25 10:18:23 +04:00
|
|
|
} else if m > MB {
|
2014-10-30 05:51:02 +03:00
|
|
|
return fmt.Sprintf("%0.3fM", float64(m)/float64(MB))
|
2014-08-25 10:18:23 +04:00
|
|
|
} else if m > KB {
|
2014-10-30 05:51:02 +03:00
|
|
|
return fmt.Sprintf("%0.3fK", float64(m)/float64(KB))
|
2014-08-25 10:18:23 +04:00
|
|
|
} else {
|
|
|
|
return fmt.Sprintf("%d", m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *info) Dump(section string) []byte {
|
|
|
|
buf := &bytes.Buffer{}
|
|
|
|
switch strings.ToLower(section) {
|
|
|
|
case "":
|
|
|
|
i.dumpAll(buf)
|
|
|
|
case "server":
|
|
|
|
i.dumpServer(buf)
|
|
|
|
case "mem":
|
|
|
|
i.dumpMem(buf)
|
2014-10-30 05:51:02 +03:00
|
|
|
case "gc":
|
|
|
|
i.dumpGC(buf)
|
2014-10-15 06:41:43 +04:00
|
|
|
case "store":
|
|
|
|
i.dumpStore(buf)
|
2014-09-23 13:53:52 +04:00
|
|
|
case "replication":
|
|
|
|
i.dumpReplication(buf)
|
2014-08-25 10:18:23 +04:00
|
|
|
default:
|
|
|
|
buf.WriteString(fmt.Sprintf("# %s\r\n", section))
|
|
|
|
}
|
|
|
|
|
|
|
|
return buf.Bytes()
|
|
|
|
}
|
|
|
|
|
|
|
|
type infoPair struct {
|
|
|
|
Key string
|
|
|
|
Value interface{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *info) dumpAll(buf *bytes.Buffer) {
|
|
|
|
i.dumpServer(buf)
|
|
|
|
buf.Write(Delims)
|
2014-10-15 06:41:43 +04:00
|
|
|
i.dumpStore(buf)
|
2014-08-25 10:18:23 +04:00
|
|
|
buf.Write(Delims)
|
|
|
|
i.dumpMem(buf)
|
|
|
|
buf.Write(Delims)
|
2014-10-30 05:51:02 +03:00
|
|
|
i.dumpGC(buf)
|
|
|
|
buf.Write(Delims)
|
2014-09-23 13:53:52 +04:00
|
|
|
i.dumpReplication(buf)
|
2014-08-25 10:18:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (i *info) dumpServer(buf *bytes.Buffer) {
|
|
|
|
buf.WriteString("# Server\r\n")
|
|
|
|
|
|
|
|
i.dumpPairs(buf, infoPair{"os", i.Server.OS},
|
|
|
|
infoPair{"process_id", i.Server.ProceessId},
|
|
|
|
infoPair{"addr", i.app.cfg.Addr},
|
2014-10-10 05:49:16 +04:00
|
|
|
infoPair{"http_addr", i.app.cfg.HttpAddr},
|
2014-10-15 06:41:43 +04:00
|
|
|
infoPair{"readonly", i.app.cfg.Readonly},
|
|
|
|
infoPair{"goroutine_num", runtime.NumGoroutine()},
|
2014-10-30 05:51:02 +03:00
|
|
|
infoPair{"cgo_call_num", runtime.NumCgoCall()},
|
2014-11-11 10:21:41 +03:00
|
|
|
infoPair{"resp_client_num", i.app.respClientNum()},
|
2014-10-15 06:41:43 +04:00
|
|
|
)
|
2014-08-25 10:18:23 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
func (i *info) dumpMem(buf *bytes.Buffer) {
|
|
|
|
buf.WriteString("# Mem\r\n")
|
|
|
|
|
|
|
|
var mem runtime.MemStats
|
|
|
|
runtime.ReadMemStats(&mem)
|
|
|
|
|
2014-10-30 05:51:02 +03:00
|
|
|
i.dumpPairs(buf, infoPair{"mem_alloc", getMemoryHuman(mem.Alloc)},
|
|
|
|
infoPair{"mem_sys", getMemoryHuman(mem.Sys)},
|
|
|
|
infoPair{"mem_looksups", getMemoryHuman(mem.Lookups)},
|
|
|
|
infoPair{"mem_mallocs", getMemoryHuman(mem.Mallocs)},
|
|
|
|
infoPair{"mem_frees", getMemoryHuman(mem.Frees)},
|
|
|
|
infoPair{"mem_total", getMemoryHuman(mem.TotalAlloc)},
|
|
|
|
infoPair{"mem_heap_alloc", getMemoryHuman(mem.HeapAlloc)},
|
|
|
|
infoPair{"mem_heap_sys", getMemoryHuman(mem.HeapSys)},
|
|
|
|
infoPair{"mem_head_idle", getMemoryHuman(mem.HeapIdle)},
|
|
|
|
infoPair{"mem_head_inuse", getMemoryHuman(mem.HeapInuse)},
|
|
|
|
infoPair{"mem_head_released", getMemoryHuman(mem.HeapReleased)},
|
|
|
|
infoPair{"mem_head_objects", mem.HeapObjects},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
|
|
|
gcTimeFormat = "2006/01/02 15:04:05.000"
|
|
|
|
)
|
|
|
|
|
|
|
|
func (i *info) dumpGC(buf *bytes.Buffer) {
|
2014-10-30 06:21:09 +03:00
|
|
|
buf.WriteString("# GC\r\n")
|
|
|
|
|
2014-10-30 05:51:02 +03:00
|
|
|
count := 5
|
|
|
|
|
|
|
|
var st debug.GCStats
|
|
|
|
st.Pause = make([]time.Duration, count)
|
|
|
|
// st.PauseQuantiles = make([]time.Duration, count)
|
|
|
|
debug.ReadGCStats(&st)
|
|
|
|
|
|
|
|
h := make([]string, 0, count)
|
|
|
|
|
|
|
|
for i := 0; i < count && i < len(st.Pause); i++ {
|
|
|
|
h = append(h, st.Pause[i].String())
|
|
|
|
}
|
|
|
|
|
|
|
|
i.dumpPairs(buf, infoPair{"gc_last_time", st.LastGC.Format(gcTimeFormat)},
|
|
|
|
infoPair{"gc_num", st.NumGC},
|
|
|
|
infoPair{"gc_pause_total", st.PauseTotal.String()},
|
|
|
|
infoPair{"gc_pause_history", strings.Join(h, ",")},
|
|
|
|
)
|
2014-08-25 10:18:23 +04:00
|
|
|
}
|
|
|
|
|
2014-10-15 06:41:43 +04:00
|
|
|
func (i *info) dumpStore(buf *bytes.Buffer) {
|
|
|
|
buf.WriteString("# Store\r\n")
|
|
|
|
|
|
|
|
s := i.app.ldb.StoreStat()
|
|
|
|
|
2014-10-31 15:33:13 +03:00
|
|
|
// getNum := s.GetNum.Get()
|
|
|
|
// getTotalTime := s.GetTotalTime.Get()
|
2014-10-30 11:06:44 +03:00
|
|
|
|
2014-10-31 15:33:13 +03:00
|
|
|
// gt := int64(0)
|
|
|
|
// if getNum > 0 {
|
|
|
|
// gt = getTotalTime.Nanoseconds() / (getNum * 1e3)
|
|
|
|
// }
|
2014-10-30 11:06:44 +03:00
|
|
|
|
2014-10-31 15:33:13 +03:00
|
|
|
// commitNum := s.BatchCommitNum.Get()
|
|
|
|
// commitTotalTime := s.BatchCommitTotalTime.Get()
|
2014-10-30 11:06:44 +03:00
|
|
|
|
2014-10-31 15:33:13 +03:00
|
|
|
// ct := int64(0)
|
|
|
|
// if commitNum > 0 {
|
|
|
|
// ct = commitTotalTime.Nanoseconds() / (commitNum * 1e3)
|
|
|
|
// }
|
2014-10-30 11:06:44 +03:00
|
|
|
|
2014-10-16 05:45:48 +04:00
|
|
|
i.dumpPairs(buf, infoPair{"name", i.app.cfg.DBName},
|
2014-10-15 06:41:43 +04:00
|
|
|
infoPair{"get", s.GetNum},
|
|
|
|
infoPair{"get_missing", s.GetMissingNum},
|
2014-10-31 15:33:13 +03:00
|
|
|
infoPair{"put", s.PutNum},
|
|
|
|
infoPair{"delete", s.DeleteNum},
|
|
|
|
infoPair{"get_total_time", s.GetTotalTime.Get().String()},
|
2014-10-15 06:41:43 +04:00
|
|
|
infoPair{"iter", s.IterNum},
|
|
|
|
infoPair{"iter_seek", s.IterSeekNum},
|
|
|
|
infoPair{"iter_close", s.IterCloseNum},
|
|
|
|
infoPair{"batch_commit", s.BatchCommitNum},
|
2014-10-31 15:33:13 +03:00
|
|
|
infoPair{"batch_commit_total_time", s.BatchCommitTotalTime.Get().String()},
|
2014-10-15 06:41:43 +04:00
|
|
|
)
|
2014-08-25 10:18:23 +04:00
|
|
|
}
|
|
|
|
|
2014-09-23 13:53:52 +04:00
|
|
|
func (i *info) dumpReplication(buf *bytes.Buffer) {
|
|
|
|
buf.WriteString("# Replication\r\n")
|
|
|
|
|
|
|
|
p := []infoPair{}
|
2014-10-21 13:35:03 +04:00
|
|
|
i.app.slock.Lock()
|
2014-09-24 05:46:36 +04:00
|
|
|
slaves := make([]string, 0, len(i.app.slaves))
|
2014-10-21 13:35:03 +04:00
|
|
|
for _, s := range i.app.slaves {
|
|
|
|
slaves = append(slaves, s.slaveListeningAddr)
|
2014-09-23 13:53:52 +04:00
|
|
|
}
|
2014-10-21 13:35:03 +04:00
|
|
|
i.app.slock.Unlock()
|
2014-09-24 05:46:36 +04:00
|
|
|
|
2015-02-10 08:42:34 +03:00
|
|
|
i.app.m.Lock()
|
|
|
|
slaveof := i.app.cfg.SlaveOf
|
|
|
|
i.app.m.Unlock()
|
|
|
|
|
|
|
|
isSlave := len(slaveof) > 0
|
|
|
|
|
|
|
|
if isSlave {
|
|
|
|
p = append(p, infoPair{"role", "slave"})
|
|
|
|
} else {
|
|
|
|
p = append(p, infoPair{"role", "master"})
|
|
|
|
}
|
|
|
|
|
2014-10-22 05:11:14 +04:00
|
|
|
num := i.Replication.PubLogNum.Get()
|
2014-10-13 10:37:31 +04:00
|
|
|
p = append(p, infoPair{"pub_log_num", num})
|
|
|
|
|
2014-10-22 05:11:14 +04:00
|
|
|
ackNum := i.Replication.PubLogAckNum.Get()
|
|
|
|
totalTime := i.Replication.PubLogTotalAckTime.Get().Nanoseconds() / 1e6
|
|
|
|
if ackNum != 0 {
|
|
|
|
p = append(p, infoPair{"pub_log_ack_per_time", totalTime / ackNum})
|
2014-10-13 10:37:31 +04:00
|
|
|
} else {
|
2014-10-22 05:11:14 +04:00
|
|
|
p = append(p, infoPair{"pub_log_ack_per_time", 0})
|
2014-10-13 10:37:31 +04:00
|
|
|
}
|
|
|
|
|
2015-02-10 08:42:34 +03:00
|
|
|
p = append(p, infoPair{"slaveof", slaveof})
|
2014-09-24 05:46:36 +04:00
|
|
|
|
|
|
|
if len(slaves) > 0 {
|
2014-10-10 05:49:16 +04:00
|
|
|
p = append(p, infoPair{"slaves", strings.Join(slaves, ",")})
|
2014-09-24 05:46:36 +04:00
|
|
|
}
|
|
|
|
|
2015-02-10 08:42:34 +03:00
|
|
|
s, _ := i.app.ldb.ReplicationStat()
|
|
|
|
if s != nil {
|
2014-10-09 06:47:21 +04:00
|
|
|
p = append(p, infoPair{"last_log_id", s.LastID})
|
|
|
|
p = append(p, infoPair{"first_log_id", s.FirstID})
|
|
|
|
p = append(p, infoPair{"commit_log_id", s.CommitID})
|
2014-10-09 07:03:58 +04:00
|
|
|
} else {
|
|
|
|
p = append(p, infoPair{"last_log_id", 0})
|
|
|
|
p = append(p, infoPair{"first_log_id", 0})
|
|
|
|
p = append(p, infoPair{"commit_log_id", 0})
|
2014-10-09 06:47:21 +04:00
|
|
|
}
|
2014-09-24 05:46:36 +04:00
|
|
|
|
2014-10-22 05:11:14 +04:00
|
|
|
p = append(p, infoPair{"master_last_log_id", i.Replication.MasterLastLogID.Get()})
|
|
|
|
|
2015-02-10 08:42:34 +03:00
|
|
|
if isSlave {
|
|
|
|
// add some redis slave replication info for outer failover service :-)
|
|
|
|
state := i.app.m.state.Get()
|
|
|
|
if state == replSyncState || state == replConnectedState {
|
|
|
|
p = append(p, infoPair{"master_link_status", "up"})
|
|
|
|
} else {
|
|
|
|
p = append(p, infoPair{"master_link_status", "down"})
|
|
|
|
}
|
|
|
|
|
|
|
|
// here, all the slaves have same priority now
|
|
|
|
p = append(p, infoPair{"slave_priority", 100})
|
|
|
|
if s != nil {
|
|
|
|
if s.LastID > 0 {
|
|
|
|
p = append(p, infoPair{"slave_repl_offset", s.LastID})
|
|
|
|
} else {
|
|
|
|
p = append(p, infoPair{"slave_repl_offset", s.CommitID})
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
p = append(p, infoPair{"slave_repl_offset", 0})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-23 13:53:52 +04:00
|
|
|
i.dumpPairs(buf, p...)
|
|
|
|
}
|
|
|
|
|
2014-08-25 10:18:23 +04:00
|
|
|
func (i *info) dumpPairs(buf *bytes.Buffer, pairs ...infoPair) {
|
|
|
|
for _, v := range pairs {
|
|
|
|
buf.WriteString(fmt.Sprintf("%s:%v\r\n", v.Key, v.Value))
|
|
|
|
}
|
|
|
|
}
|