From 3ea0f08d2b77a2d95cc4fc12988876a6951a26bd Mon Sep 17 00:00:00 2001 From: wangyao Date: Fri, 4 Nov 2022 19:30:30 +0800 Subject: [PATCH 1/7] improve: performance --- backend/balancer.go | 2 + backend/db.go | 95 ++++++++++++++++++++------------------ backend/node.go | 2 - cmd/he3proxy/main.go | 1 + proxy/server/conn_pgsql.go | 8 ++-- proxy/server/server.go | 19 +++++++- 6 files changed, 76 insertions(+), 51 deletions(-) diff --git a/backend/balancer.go b/backend/balancer.go index bb7d281..9b761ec 100644 --- a/backend/balancer.go +++ b/backend/balancer.go @@ -83,6 +83,8 @@ func (n *Node) InitBalancer() { } func (n *Node) getNextSlaveByWeight() (*DB, error) { + n.Lock() + defer n.Unlock() var index int queueLen := len(n.RoundRobinQ) if queueLen == 0 { diff --git a/backend/db.go b/backend/db.go index 28e5b59..39a7bdb 100644 --- a/backend/db.go +++ b/backend/db.go @@ -353,6 +353,52 @@ func (db *DB) PopConn() (*Conn, error) { return co, nil } +func (db *DB) InitConnPoolPg(dbname string, dbuser string) (err error) { + //判断是否关闭链接池 默认开启 + if os.Getenv(config.ConnPoolSwitch) == "false" { + return + } + var cacheConns chan *Conn + db.Lock() + defer db.Unlock() + db.db = dbname + db.user = dbuser + _, flag := db.cacheConnsMap.Load(dbuser+dbname) + if !flag { + if db.IsExceedMaxConns() { + err = errors.ErrConnIsFull + return err + } + //判断是否超过最大链接池数量 + if db.cacheCountNum >= db.maxPoolNum { + err = errors.ErrMaxPoolIsFull + return err + } + //InitConnNum 根据配置文件配置而来;默认值16 + cacheConns = make(chan *Conn, db.InitConnNum) + atomic.StoreInt32(&(db.state), Unknown) + + //循环建立链接时间较慢 暂且使用协程处理; + //2022.06.08 测试发现并发下 协程创建 会导致链接池数量不稳定,因此暂时注销,采用同步方式,这样第一次链接查询就会变慢 + //go func() { + //初始化线程池 + for i := 0; i < db.InitConnNum; i++ { + conn, err := db.newConn(dbuser) + + if err != nil { + db.Close() + } + + cacheConns <- conn + atomic.AddInt64(&db.pushConnCount, 1) + atomic.AddInt64(&db.currConnCount, 1) + } + db.cacheConnsMap.Store(dbuser+dbname, cacheConns) + atomic.AddInt32(&db.cacheCountNum, 1) + } + return nil +} + func (db *DB) PopConnPg(dbname string, dbuser string) (*Conn, error) { var co *Conn var err error @@ -370,54 +416,15 @@ func (db *DB) PopConnPg(dbname string, dbuser string) (*Conn, error) { } } else { var cacheConns chan *Conn - db.Lock() val, flag := db.cacheConnsMap.Load(dbuser+dbname) - if !flag { - if db.IsExceedMaxConns() { - err = errors.ErrConnIsFull - return nil, err - } - //判断是否超过最大链接池数量 - if db.cacheCountNum >= db.maxPoolNum { - err = errors.ErrMaxPoolIsFull + if flag { + cacheConns = val.(chan *Conn) + }else { + err = db.InitConnPoolPg(dbname, dbuser) + if err!=nil { return nil, err } - //InitConnNum 根据配置文件配置而来;默认值16 - cacheConns = make(chan *Conn, db.InitConnNum) - atomic.StoreInt32(&(db.state), Unknown) - - //循环建立链接时间较慢 暂且使用协程处理; - //2022.06.08 测试发现并发下 协程创建 会导致链接池数量不稳定,因此暂时注销,采用同步方式,这样第一次链接查询就会变慢 - //go func() { - //初始化线程池 - for i := 0; i < db.InitConnNum; i++ { - conn, err := db.newConn(dbuser) - - if err != nil { - db.Close() - } - - cacheConns <- conn - atomic.AddInt64(&db.pushConnCount, 1) - atomic.AddInt64(&db.currConnCount, 1) - } - db.cacheConnsMap.Store(dbuser+dbname, cacheConns) - atomic.AddInt32(&db.cacheCountNum, 1) - //}() - - //返回一个链接 - //co, err = db.newConn(dbuser) - //if err != nil { - // db.Close() - // return nil, err - //} - //atomic.AddInt64(&db.pushConnCount, 1) - //atomic.AddInt64(&db.currConnCount, 1) - }else { - cacheConns = val.(chan *Conn) } - db.Unlock() - co = db.GetConnFromCache(cacheConns) if co == nil { golog.Warn("db", "PopConnPg", "conn is nil", 0) diff --git a/backend/node.go b/backend/node.go index 2ef1b5a..39a17e8 100644 --- a/backend/node.go +++ b/backend/node.go @@ -201,10 +201,8 @@ func (n *Node) GetSlaveConn() (*BackendConn, error) { } func (n *Node) GetSlaveConnPg(dbname string, dbuser string, tablename string) (*BackendConn, error) { - n.Lock() n.NodeCacheKey = dbname + "_" + tablename db, err := n.GetNextSlave() - n.Unlock() if err != nil { return nil, err } diff --git a/cmd/he3proxy/main.go b/cmd/he3proxy/main.go index 6ba0e73..781cdd8 100644 --- a/cmd/he3proxy/main.go +++ b/cmd/he3proxy/main.go @@ -74,6 +74,7 @@ const banner string = ` func main() { fmt.Print(banner) + fmt.Printf("set runtime GOMAXPROCS: %d \n", runtime.NumCPU()) runtime.GOMAXPROCS(runtime.NumCPU()) // Get parameters output in CMD flag.Parse() diff --git a/proxy/server/conn_pgsql.go b/proxy/server/conn_pgsql.go index 4e673fa..5d82ba2 100644 --- a/proxy/server/conn_pgsql.go +++ b/proxy/server/conn_pgsql.go @@ -161,7 +161,7 @@ func (c *ClientConn) RunPg(ctx context.Context) { golog.Error(moduleName, "RunPg", err.Error(), c.connectionId, ) - c.writePgErr(ctx, err) + c.writePgErr(ctx, "22000", err.Error()) if err == mysql.ErrBadConn { c.Close() } @@ -1011,13 +1011,13 @@ func (cc *ClientConn) writeAuthenticationOK(ctx context.Context) error { } // writePgErr -func (cc *ClientConn) writePgErr(ctx context.Context, err error) error { +func (cc *ClientConn) writePgErr(ctx context.Context, code string, errmsg string) error { errorResponse := &pgproto3.ErrorResponse{ Severity: "ERROR", SeverityUnlocalized: "", //TODO The error needs to be returned according to the error code. - Code: "42P04", - Message: err.Error(), + Code: code, + Message: errmsg, Detail: "", Hint: "", } diff --git a/proxy/server/server.go b/proxy/server/server.go index eec9af4..7bdae21 100644 --- a/proxy/server/server.go +++ b/proxy/server/server.go @@ -419,7 +419,7 @@ func (s *Server) onConn(c net.Conn, dbType string) { if err := conn.handshake(ctx); err != nil { golog.Error("server", "onConn", err.Error(), 0) - conn.writePgErr(ctx, err) + conn.writePgErr(ctx, "08006", err.Error()) conn.Close() return } @@ -430,6 +430,23 @@ func (s *Server) onConn(c net.Conn, dbType string) { conn.schema = s.GetSchema(config.DefaultHe3User) } + // init all db-node connection pool + masterNode := s.nodes["node1"].Master + if err := masterNode.InitConnPoolPg(conn.db, conn.user); err != nil { + golog.Error("server", "onConn", fmt.Sprintf("masterNode %s InitConnPoolPg err: %s", masterNode.Addr(), err.Error()), 0) + conn.writePgErr(ctx, "53300", err.Error()) + conn.Close() + return + } + for _, db := range s.nodes["node1"].Slave { + if err := db.InitConnPoolPg(conn.db, conn.user); err != nil { + golog.Error("server", "onConn", fmt.Sprintf("Slave %s InitConnPoolPg err: %s", db.Addr(), err.Error()), 0) + conn.writePgErr(ctx, "53300", err.Error()) + conn.Close() + return + } + } + conn.RunPg(ctx) } //else { -- Gitee From e22d3465083c44c16f1f292a4f4e5f10a6b09967 Mon Sep 17 00:00:00 2001 From: wangyao Date: Thu, 10 Nov 2022 12:15:55 +0800 Subject: [PATCH 2/7] improve: performance --- backend/backend_conn.go | 19 ++- backend/balancer.go | 42 +++-- backend/db.go | 41 +++-- backend/node.go | 53 ++++-- backend/pg_prometheus_metric.go | 96 +++++++---- cmd/he3proxy/main.go | 9 +- config/config.go | 6 + core/errors/errors.go | 4 - go.mod | 8 +- go.sum | 16 ++ mysql/const.go | 2 + proxy/server/conn_pgsql.go | 288 +++++++++++++++++++++++++------- proxy/server/conn_preshard.go | 91 +++++++++- proxy/server/server.go | 17 +- proxy/server/util.go | 10 ++ 15 files changed, 556 insertions(+), 146 deletions(-) diff --git a/backend/backend_conn.go b/backend/backend_conn.go index 8cc8fdf..f34e2f8 100644 --- a/backend/backend_conn.go +++ b/backend/backend_conn.go @@ -145,13 +145,16 @@ func (c *Conn) ReConnectPg() error { if c.db == "" { c.db = "postgres" } - urlExample := fmt.Sprintf("postgres://%s:%s@%s/%s", c.user, c.password, c.addr, c.db) + var str = []string{"postgres://",c.user,":", c.password, "@", c.addr, "/", c.db} + urlExample := strings.Join(str, "") + //urlExample := fmt.Sprintf("postgres://%s:%s@%s/%s", c.user, c.password, c.addr, c.db) conn, err := pgx.Connect(context.Background(), urlExample) if err != nil { golog.Error("backend conn", "ReConnectPg", "pgx.Connect", 0, "urlExample", urlExample, "error", err.Error()) return err } c.ConnPg = *conn + c.conn = c.ConnPg.PgConn().Conn().(*net.TCPConn) //SetNoDelay controls whether the operating system should delay packet transmission // in hopes of sending fewer packets (Nagle's algorithm). @@ -159,9 +162,9 @@ func (c *Conn) ReConnectPg() error { // meaning that data is sent as soon as possible after a Write. //I set this option false. c.ConnPg.PgConn().Conn().(*net.TCPConn).SetNoDelay(false) - c.ConnPg.PgConn().Conn().(*net.TCPConn).SetKeepAlive(true) + //c.ConnPg.PgConn().Conn().(*net.TCPConn).SetKeepAlive(true) - c.pkg = mysql.NewPacketIO(c.ConnPg.PgConn().Conn()) + c.pkg = mysql.NewPacketIO(c.ConnPg.PgConn().Conn().(*net.TCPConn)) //if err := c.ConnPg.Ping(context.Background()); err != nil { // golog.Error("backend conn", "ReConnectPg", "pgx.Ping", 0, "urlExample", urlExample, "error", err.Error()) @@ -403,7 +406,7 @@ func (c *Conn) Ping() error { } } else { if err := c.ConnPg.Ping(context.Background()); err != nil { - golog.Error("backend conn", "Ping", "ConnPg.Ping", 0, "error", err.Error()) + golog.Error("backend conn", "Ping", "ConnPg.Ping", 0, "error", err.Error()) c.ConnPg.Close(context.Background()) return err } @@ -789,8 +792,7 @@ func (c *Conn) WritePgPacket(data []byte) error { return err } -func (c *Conn) ReadPgPacket() ([]byte, error) { - header := make([]byte, 5) +func (c *Conn) ReadPgPacket(header []byte) ([]byte, error) { if _, err := io.ReadFull(c.pkg.Rb, header); err != nil { return nil, err } @@ -804,3 +806,8 @@ func (c *Conn) ReadPgPacket() ([]byte, error) { } return append(header, msg...), nil } + +// pg msg streaming can not through this way to get all msg +func (c *Conn) ReadPgAllPacket() ([]byte, error) { + return io.ReadAll(c.pkg.Rb) +} diff --git a/backend/balancer.go b/backend/balancer.go index 9b761ec..cc54cdc 100644 --- a/backend/balancer.go +++ b/backend/balancer.go @@ -17,9 +17,11 @@ package backend import ( "fmt" "math/rand" + "os" "strings" "time" + "gitee.com/he3db/he3proxy/config" "gitee.com/he3db/he3proxy/core/errors" "gitee.com/he3db/he3proxy/core/golog" ) @@ -113,19 +115,25 @@ func (n *Node) GetNextSlave() (*DB, error) { // Determine whether the LSN number meets the requirements // first get current db_table's LSN, if not exist will use node's LSN var masterLsn uint64 - golog.Debug("balancer", "GetNextSlave", - fmt.Sprintf("Node Cache Key: [%s]", n.NodeCacheKey), 0) + if golog.GetLevel() <= 1 { + golog.Debug("balancer", "GetNextSlave", + fmt.Sprintf("Node Cache Key: [%s]", n.NodeCacheKey), 0) + } val, flag := n.NodeLsn.Load(n.NodeCacheKey) if !flag { val, _ = n.NodeLsn.Load(strings.Split(n.Master.addr, ":")[0]) masterLsn = val.(uint64) // parse string LSN to uint64 - golog.Debug("balancer", "GetNextSlave", - fmt.Sprintf("master node addr: [%s], 10hex lsn: [%d]", n.Master.addr, masterLsn), 0) + if golog.GetLevel() <= 1 { + golog.Debug("balancer", "GetNextSlave", + fmt.Sprintf("master node addr: [%s], 10hex lsn: [%d]", n.Master.addr, masterLsn), 0) + } } else { masterLsn = val.(uint64) - golog.Debug("balancer", "GetNextSlave", - fmt.Sprintf("current use table [%s] latest LSN, 10hex lsn: [%d]", n.NodeCacheKey, masterLsn), 0) + if golog.GetLevel() <= 1 { + golog.Debug("balancer", "GetNextSlave", + fmt.Sprintf("current use table [%s] latest LSN, 10hex lsn: [%d]", n.NodeCacheKey, masterLsn), 0) + } } // save standby node index where LSN meet the conditions lsnSlice := make([]int, 0, 0) @@ -133,8 +141,10 @@ func (n *Node) GetNextSlave() (*DB, error) { val, flag = n.NodeLsn.Load(strings.Split(db.addr, ":")[0]) if flag { parseUint := val.(uint64) - golog.Debug("balancer", "GetNextSlave", - fmt.Sprintf("slave node index: [%d], addr: [%s], 10hex lsn: [%d]", i, db.addr, parseUint), 0) + if golog.GetLevel() <= 1 { + golog.Debug("balancer", "GetNextSlave", + fmt.Sprintf("slave node index: [%d], addr: [%s], 10hex lsn: [%d]", i, db.addr, parseUint), 0) + } if parseUint >= masterLsn { lsnSlice = append(lsnSlice, i) } @@ -178,6 +188,10 @@ func (n *Node) GetNextSlave() (*DB, error) { } return n.Slave[index], nil } else if n.Cfg.LoadBalanceMode == "cache" { + // if use simple parse will use lex analysis, and can not get table name, so change to weight mode + if os.Getenv(config.SimpleParseFlag) == "true" { + return n.getNextSlaveByWeight() + } // "cache" means switch cached node first // get table cache nodes indexs := n.getNextSlaveByCache() @@ -200,19 +214,25 @@ func (n *Node) getNextSlaveByCache() []int { val, flag := n.NodeCache.Load(n.NodeCacheKey) if flag { tableCachedNodes := val.([]string) - golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("table cached key:%s, nodes: %s", n.NodeCacheKey, tableCachedNodes), 0) + if golog.GetLevel() <= 1 { + golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("table cached key:%s, nodes: %s", n.NodeCacheKey, tableCachedNodes), 0) + } if tableCachedNodes != nil && len(tableCachedNodes) > 0 { for _, cacheNode := range tableCachedNodes { for i, db := range n.Slave { //if cacheNode == strings.Split(db.addr, ":")[0] { // cacheNode ip:port if cacheNode == db.addr { - golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("cacheNode matched addr: %s", cacheNode), 0) + if golog.GetLevel() <= 1 { + golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("cacheNode matched addr: %s", cacheNode), 0) + } indexs = append(indexs, i) } } } - golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("cacheNode matched indexs: %v", indexs), 0) + if golog.GetLevel() <= 1 { + golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("cacheNode matched indexs: %v", indexs), 0) + } } } return indexs diff --git a/backend/db.go b/backend/db.go index 39a7bdb..056528b 100644 --- a/backend/db.go +++ b/backend/db.go @@ -16,11 +16,14 @@ package backend import ( "context" + "fmt" "os" "sync" "sync/atomic" "time" + timecost "github.com/dablelv/go-huge-util" + "gitee.com/he3db/he3proxy/config" "gitee.com/he3db/he3proxy/core/golog" @@ -63,7 +66,7 @@ type DB struct { cacheConnsMap sync.Map cacheCountNum int32 currConnCount int64 //当前总链接数 - maxPoolNum int32 //支持的最大db链接池 + maxPoolNum int32 //支持的最大db链接池 } func Open(addr string, user string, password string, dbName string, maxConnNum int) (*DB, error) { @@ -256,10 +259,10 @@ func (db *DB) closeConn(co *Conn) error { // If the user link is not cached in the connection pool, it will be released directly. // If the link is cached, the current link will be released and a new link will be generated to join the pool. // The number of connections in the maintenance pool will remain unchanged - val, flag := db.cacheConnsMap.Load(db.user+db.db) + val, flag := db.cacheConnsMap.Load(db.user + db.db) if !flag { atomic.AddInt64(&db.currConnCount, -1) - }else { + } else { conns := val.(chan *Conn) if len(conns) == db.InitConnNum { atomic.AddInt64(&db.currConnCount, -1) @@ -329,6 +332,14 @@ func (db *DB) tryReuse(co *Conn) error { } func (db *DB) PopConn() (*Conn, error) { + if os.Getenv(config.TimeCostFlag) == "true" { + c := timecost.TimeCost() + defer func() { + if golog.GetLevel() <= 3 { + golog.Warn("TIMECOST", "timecost", fmt.Sprintf("PopConn time cost is %v", c()), 0) + } + }() + } var co *Conn var err error @@ -363,7 +374,7 @@ func (db *DB) InitConnPoolPg(dbname string, dbuser string) (err error) { defer db.Unlock() db.db = dbname db.user = dbuser - _, flag := db.cacheConnsMap.Load(dbuser+dbname) + _, flag := db.cacheConnsMap.Load(dbuser + dbname) if !flag { if db.IsExceedMaxConns() { err = errors.ErrConnIsFull @@ -400,6 +411,12 @@ func (db *DB) InitConnPoolPg(dbname string, dbuser string) (err error) { } func (db *DB) PopConnPg(dbname string, dbuser string) (*Conn, error) { + if os.Getenv(config.TimeCostFlag) == "true" { + c := timecost.TimeCost() + defer func() { + golog.Warn("TIMECOST", "timecost", fmt.Sprintf("handleQueryPg time cost is %v", c()), 0) + }() + } var co *Conn var err error @@ -416,12 +433,12 @@ func (db *DB) PopConnPg(dbname string, dbuser string) (*Conn, error) { } } else { var cacheConns chan *Conn - val, flag := db.cacheConnsMap.Load(dbuser+dbname) + val, flag := db.cacheConnsMap.Load(dbuser + dbname) if flag { cacheConns = val.(chan *Conn) - }else { + } else { err = db.InitConnPoolPg(dbname, dbuser) - if err!=nil { + if err != nil { return nil, err } } @@ -524,7 +541,7 @@ func (db *DB) PushConnForExtendedProtocol(co *Conn, err error) { if co == nil { return } - val, flag := db.cacheConnsMap.Load(db.user+db.db) + val, flag := db.cacheConnsMap.Load(db.user + db.db) // 因后端链接未销毁 到时扩展协议时报错: prepared statement "lrupsc_1_0" already exists (SQLSTATE 42P05) // 如果成功创建了一个命名的预备语句对象,那么它将持续到当前会话结束, 除非被明确地删除 (暂未实现 需要采用close命令关闭),现在是直接关闭了链接 // http://www.postgres.cn/docs/14/protocol-message-formats.html @@ -576,18 +593,18 @@ func (db *DB) PushConn(co *Conn, err error) { if co == nil { return } - val, flag := db.cacheConnsMap.Load(db.user+db.db) + val, flag := db.cacheConnsMap.Load(db.user + db.db) if !flag || err != nil { co.Close() atomic.AddInt64(&db.currConnCount, -1) return - }else { + } else { conns := val.(chan *Conn) if len(conns) == db.InitConnNum { co.Close() atomic.AddInt64(&db.currConnCount, -1) return - }else { + } else { co.pushTimestamp = time.Now().Unix() select { case conns <- co: @@ -605,7 +622,7 @@ func (db *DB) PushConn(co *Conn, err error) { type BackendConn struct { *Conn db *DB - IsInTransaction bool // 是否在事务中 + IsInTransaction bool // 是否在事务中 } func (p *BackendConn) Close() { diff --git a/backend/node.go b/backend/node.go index 39a17e8..ab5fe7c 100644 --- a/backend/node.go +++ b/backend/node.go @@ -463,8 +463,11 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { if conn == nil || conn.ConnPg.Ping(context.Background()) != nil { n.Master, err = n.OpenDB(masterStr) if err != nil { - golog.Error("node", "getLsnAndCacheMetadata", fmt.Sprintf("open master db err : %s, addr: %s", + if golog.GetLevel() <= 4 { + golog.Error("node", "getLsnAndCacheMetadata", fmt.Sprintf("open master db err : %s, addr: %s", err.Error(), masterStr), 0) + } + return } conn = n.Master.checkConn @@ -473,23 +476,33 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { // init read-only node's LSN rows, er := conn.ConnPg.Query(context.Background(), "select client_addr::text, replay_lsn from pg_stat_replication;") if er != nil { - golog.Error("node", "ParseMaster", fmt.Sprintf("Query pg_stat_replication failed: %s", er.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("node", "ParseMaster", fmt.Sprintf("Query pg_stat_replication failed: %s", er.Error()), 0) + } } var addr string var lsn string for rows.Next() { er = rows.Scan(&addr, &lsn) if er != nil { - golog.Error("node", "ParseMaster", fmt.Sprintf("scan client_addr,replay_lsn err : %s", er.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("node", "ParseMaster", fmt.Sprintf("scan client_addr,replay_lsn err : %s", er.Error()), 0) + } + break } if val, err := pgLsnInInternal(lsn); err != nil { - golog.Error("node", "ParseMaster", fmt.Sprintf("slave node parse LSN 16hex to 10hex err : %s, addr: %s", - err.Error(), addr), 0) + if golog.GetLevel() <= 4 { + golog.Error("node", "ParseMaster", fmt.Sprintf("slave node parse LSN 16hex to 10hex err : %s, addr: %s", + err.Error(), addr), 0) + } + } else { n.NodeLsn.Store(strings.Split(addr, "/")[0], val) - golog.Trace("node", "ParseMaster", - fmt.Sprintf("slave node parse LSN, addr: %s, lsn 16hex: %s, lsn 10hex: %d ", addr, lsn, val), 0) + if golog.GetLevel() <= 0 { + golog.Trace("node", "ParseMaster", + fmt.Sprintf("slave node parse LSN, addr: %s, lsn 16hex: %s, lsn 10hex: %d ", addr, lsn, val), 0) + } } } // init master node's LSN @@ -498,18 +511,24 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { golog.Error("node", "ParseMaster", "scan pg_current_wal_lsn err !"+er.Error(), 0) } else { if val, err := pgLsnInInternal(lsn); err != nil { - golog.Error("node", "ParseMaster", fmt.Sprintf("master node parse LSN 16hex to 10hex err : %s, addr: %s", - err.Error(), masterStr), 0) + if golog.GetLevel() <= 4 { + golog.Error("node", "ParseMaster", fmt.Sprintf("master node parse LSN 16hex to 10hex err : %s, addr: %s", + err.Error(), masterStr), 0) + } } else { n.NodeLsn.Store(strings.Split(masterStr, ":")[0], val) - golog.Trace("node", "ParseMaster", - fmt.Sprintf("master node parse LSN, addr: %s, lsn 16hex: %s , lsn 10hex: %d", masterStr, lsn, val), 0) + if golog.GetLevel() <= 0 { + golog.Trace("node", "ParseMaster", + fmt.Sprintf("master node parse LSN, addr: %s, lsn 16hex: %s , lsn 10hex: %d", masterStr, lsn, val), 0) + } } } // init metadata about table which node cached rows, er = conn.ConnPg.Query(context.Background(), "select datname, relname, clientaddr from pg_hot_data;") if er != nil { - golog.Error("node", "ParseMaster", fmt.Sprintf("Query pg_hot_data failed: %s", er.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("node", "ParseMaster", fmt.Sprintf("Query pg_hot_data failed: %s", er.Error()), 0) + } } // Deal with `pg_hot_data` delete refresh, delete all data and restore n.NodeCache.Range(func(key, value interface{}) bool { @@ -522,10 +541,14 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { for rows.Next() { er = rows.Scan(&datname, &relname, &clientaddr) if er != nil { - golog.Error("node", "ParseMaster", fmt.Sprintf("scan datname, relname err : %s", er.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("node", "ParseMaster", fmt.Sprintf("scan datname, relname err : %s", er.Error()), 0) + } break } - golog.Trace("node", "ParseMaster", fmt.Sprintf("datname: %s, relname: %s, client_addr: %s", datname, relname, clientaddr), 0) + if golog.GetLevel() <= 0 { + golog.Trace("node", "ParseMaster", fmt.Sprintf("datname: %s, relname: %s, client_addr: %s", datname, relname, clientaddr), 0) + } val, flag := n.NodeCache.Load(datname + "_" + relname) var nodeCacheVal []string if !flag { @@ -601,7 +624,7 @@ func (n *Node) ParseSlave(slaveStr string) error { } n.InitBalancer() - if strings.ToUpper(os.Getenv(config.MetricsFlag)) != "FALSE" && (n.Cfg.LoadBalanceMode == "metric" || n.Cfg.LoadBalanceMode == "lsn") { + if os.Getenv(config.MetricsFlag) != "false" && (n.Cfg.LoadBalanceMode == "metric" || n.Cfg.LoadBalanceMode == "lsn") { n.BestNodeIndexByMetric = -1 go n.saveBestNodeIndex() } diff --git a/backend/pg_prometheus_metric.go b/backend/pg_prometheus_metric.go index 03f8952..3dc3152 100644 --- a/backend/pg_prometheus_metric.go +++ b/backend/pg_prometheus_metric.go @@ -72,51 +72,75 @@ func CalculateAndChooseBestNode(cfg *config.NodeConfig, nodeLoad map[int]int) (i } for i := 0; i < len(cfg.MonitorPgNodes); i++ { - golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("handle info with node: %s \n", cfg.MonitorPgNodes[i]), 0) + if golog.GetLevel() <= 0 { + golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("handle info with node: %s \n", cfg.MonitorPgNodes[i]), 0) + } connNum, err := getCurrentConnections(pgPrometheusUrl, pgExporterSlice[i], cfg.PgExporterName) if err != nil { - golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("Get current connections failed: %s \n", err.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("Get current connections failed: %s \n", err.Error()), 0) + } return -1, nil, errors.New(fmt.Sprintf("Get current connections failed: %s \n", err.Error())) } - golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("current connection num: %d \n", connNum), 0) + if golog.GetLevel() <= 0 { + golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("current connection num: %d \n", connNum), 0) + } cpuIdle, err := getCpuIdle(pgPrometheusUrl, nodeExporterSlice[i], cfg.NodeExporterName, cfg.NodeCpuMode, cfg.TimeInterval) if err != nil { - golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("Get current cpu idle failed: %s \n", err.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("Get current cpu idle failed: %s \n", err.Error()), 0) + } + return -1, nil, errors.New(fmt.Sprintf("Get current cpu idle failed: %s \n", err.Error())) } - golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("current cpu idle: %d \n", cpuIdle), 0) + if golog.GetLevel() <= 0 { + golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("current cpu idle: %d \n", cpuIdle), 0) + } + memIdle, err := getMemIdle(pgPrometheusUrl, nodeExporterSlice[i], cfg.NodeExporterName) if err != nil { - golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("Get current mem idle failed: %s \n", err.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("Get current mem idle failed: %s \n", err.Error()), 0) + } + return -1, nil, errors.New(fmt.Sprintf("Get current mem idle failed: %s \n", err.Error())) } - golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("current mem idle: %d \n", memIdle), 0) + if golog.GetLevel() <= 0 { + golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("current mem idle: %d \n", memIdle), 0) + } + ioUtilization, err := getDiskIOUtilization(pgPrometheusUrl, nodeExporterSlice[i], cfg.NodeExporterName, cfg.PgDataDiskName, cfg.TimeInterval) if err != nil { - golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("Get current disk io utilization failed: %s \n", err.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("Get current disk io utilization failed: %s \n", err.Error()), 0) + } return -1, nil, errors.New(fmt.Sprintf("Get current disk io utilization failed: %s \n", err.Error())) } - golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("current disk io utilization: %d \n", ioUtilization), 0) + if golog.GetLevel() <= 0 { + golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("current disk io utilization: %d \n", ioUtilization), 0) + } score := cpuIdle*CPU_WEIGHT + memIdle*MEM_WEIGHT + ioUtilization*DISK_IO_WEIGHT + (cfg.MaxConnNum-connNum)*CONNECTION_WEIGHT nodeLoad[i] = score if score > maxScore { index = i maxScore = score } - golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", - fmt.Sprintf("current score is %d, max score is %d \n", score, maxScore), 0) + if golog.GetLevel() <= 0 { + golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", + fmt.Sprintf("current score is %d, max score is %d \n", score, maxScore), 0) + } } return index, nodeLoad, nil } @@ -128,8 +152,11 @@ func getCurrentConnections(prometheusUrl string, pgInstanceLable string, pgJobLa Param("query", "sum(pg_stat_database_numbackends{instance=\""+pgInstanceLable+"\",job=\""+pgJobLable+"\"})"). EndStruct(&metric) if errs != nil || resp.StatusCode > 300 || resp.StatusCode < 200 { - golog.Error("pg prometheus metric", "getCurrentConnections", - fmt.Sprintf("%v %v", resp, errs), 0) + if golog.GetLevel() <= 4 { + golog.Error("pg prometheus metric", "getCurrentConnections", + fmt.Sprintf("%v %v", resp, errs), 0) + } + return 0, errors.New("Failed to get current connections. ") } else { return handleResult(metric) @@ -146,8 +173,11 @@ func getCpuIdle(prometheusUrl string, nodeInstanceLable string, nodeJobLable str "\",job=\""+nodeJobLable+"\"}["+timeInterval+"]))*100"). EndStruct(&metric) if errs != nil || resp.StatusCode > 300 || resp.StatusCode < 200 { - golog.Error("pg prometheus metric", "getCpuIdle", - fmt.Sprintf("%v %v", resp, errs), 0) + if golog.GetLevel() <= 4 { + golog.Error("pg prometheus metric", "getCpuIdle", + fmt.Sprintf("%v %v", resp, errs), 0) + } + return 0, errors.New("Failed to get cpu idle. ") } else { return handleResult(metric) @@ -162,8 +192,10 @@ func getMemIdle(prometheusUrl string, nodeInstanceLable string, nodeJobLable str "\",job=\""+nodeJobLable+"\"}/node_memory_MemTotal_bytes{instance=\""+nodeInstanceLable+"\",job=\""+nodeJobLable+"\"}*100"). EndStruct(&metric) if errs != nil || resp.StatusCode > 300 || resp.StatusCode < 200 { - golog.Error("pg prometheus metric", "getMemIdle", - fmt.Sprintf("%v %v", resp, errs), 0) + if golog.GetLevel() <= 4 { + golog.Error("pg prometheus metric", "getMemIdle", + fmt.Sprintf("%v %v", resp, errs), 0) + } return 0, errors.New("Failed to get mem idle. ") } else { return handleResult(metric) @@ -178,8 +210,10 @@ func getDiskIOUtilization(prometheusUrl string, nodeInstanceLable string, nodeJo "\",job=\""+nodeJobLable+"\",device=~\""+diskDevice+"\"}["+timeInterval+"])*100"). EndStruct(&metric) if errs != nil || resp.StatusCode > 300 || resp.StatusCode < 200 { - golog.Error("pg prometheus metric", "getDiskIOUtilization", - fmt.Sprintf("%v %v", resp, errs), 0) + if golog.GetLevel() <= 4 { + golog.Error("pg prometheus metric", "getDiskIOUtilization", + fmt.Sprintf("%v %v", resp, errs), 0) + } return 0, errors.New("Failed to get disk io utilization idle. ") } else { return handleResult(metric) @@ -190,8 +224,10 @@ func handleResult(metric Prometheus) (int, error) { defer func() { err := recover() if err != nil { - golog.Error("pg prometheus metric", "handleResult", - fmt.Sprintf("%v", err), 0) + if golog.GetLevel() <= 4 { + golog.Error("pg prometheus metric", "handleResult", + fmt.Sprintf("%v", err), 0) + } } }() if len(metric.Data.Result) == 0{ diff --git a/cmd/he3proxy/main.go b/cmd/he3proxy/main.go index 781cdd8..dee01ad 100644 --- a/cmd/he3proxy/main.go +++ b/cmd/he3proxy/main.go @@ -27,6 +27,7 @@ import ( "syscall" "github.com/google/gops/agent" + "github.com/pkg/profile" "github.com/pyroscope-io/client/pyroscope" "gitee.com/he3db/he3proxy/config" @@ -72,6 +73,10 @@ const banner string = ` ` func main() { + // 开始性能分析, 返回一个停止接口 + stopper := profile.Start(profile.CPUProfile, profile.ProfilePath(".")) + // 在main()结束时停止性能分析 + defer stopper.Stop() fmt.Print(banner) fmt.Printf("set runtime GOMAXPROCS: %d \n", runtime.NumCPU()) @@ -235,7 +240,9 @@ func main() { golog.Info("main", "main", "Got update config signal", 0) newCfg, err := config.ParseConfigFile(*configFile) if err != nil { - golog.Error("main", "main", fmt.Sprintf("parse config file error:%s", err.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("main", "main", fmt.Sprintf("parse config file error:%s", err.Error()), 0) + } } else { svr.UpdateConfig(newCfg) } diff --git a/config/config.go b/config/config.go index 793d3ff..a02ded7 100644 --- a/config/config.go +++ b/config/config.go @@ -32,7 +32,13 @@ const ( ConnPoolSwitch = "CONNS_POOL_SWITCH" DefaultHe3User = "he3proxy" ConnFlag = "HE3PROXY_FLAG" + //if false will close metrics collection, default true MetricsFlag = "HE3PROXY_METRICS_FLAG" + //if false will not store client connection Id, means cancel request send cmd will invalidate, default true + CancelReqFlag = "HE3PROXY_CANCEL_REQUEST_FLAG" + TimeCostFlag = "HE3PROXY_TIME_COST_FLAG" + // if true, change crdb parse to kingshard lex analysis + SimpleParseFlag = "HE3PROXY_SIMPLE_PARSE_FLAG" ) // Structure corresponding to the entire config file diff --git a/core/errors/errors.go b/core/errors/errors.go index cf34aa1..bad58e9 100644 --- a/core/errors/errors.go +++ b/core/errors/errors.go @@ -87,10 +87,6 @@ var ( ErrUnsupportDbType = errors.New("unsupport db type") ) -func ErrFormatWithCode(code string, str string, args ...interface{}) error { - return errors.New(code + fmt.Sprintf(str, args...)) -} - func ErrFormat(str string, args ...interface{}) error { return errors.New(fmt.Sprintf(str, args...)) } diff --git a/go.mod b/go.mod index bf12cf2..47e3373 100644 --- a/go.mod +++ b/go.mod @@ -24,12 +24,16 @@ require ( github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect + github.com/dablelv/go-huge-util v0.0.21 // indirect github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect github.com/dustin/go-humanize v1.0.0 // indirect + github.com/felixge/fgprof v0.9.3 // indirect github.com/getsentry/raven-go v0.2.0 // indirect + github.com/go-sql-driver/mysql v1.5.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.8 // indirect + github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgconn v1.10.1 // indirect @@ -44,17 +48,19 @@ require ( github.com/mattn/go-runewidth v0.0.10 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/profile v1.7.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.34.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect + github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.1 // indirect golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.38.0 // indirect diff --git a/go.sum b/go.sum index aea714c..fcfde42 100644 --- a/go.sum +++ b/go.sum @@ -93,6 +93,8 @@ github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7 github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dablelv/go-huge-util v0.0.21 h1:lN5AY5bPYA04XYIf/H0XCnmJqQjM/6lEncuM+5BHvfg= +github.com/dablelv/go-huge-util v0.0.21/go.mod h1:IxNJ9zomRt9d2cip5htLnK8K3qs/H0l3Y4XWQrOXhRs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -114,6 +116,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= +github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -143,6 +147,8 @@ github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvSc github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= @@ -213,6 +219,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y= +github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -230,6 +238,7 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= @@ -394,6 +403,8 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= @@ -457,6 +468,8 @@ github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIK github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -634,6 +647,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -690,6 +705,7 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/mysql/const.go b/mysql/const.go index 023f485..5e3ab37 100644 --- a/mysql/const.go +++ b/mysql/const.go @@ -176,6 +176,7 @@ var ( TK_ID_TRANSACTION = 13 TK_ID_SHOW = 14 TK_ID_TRUNCATE = 15 + TK_ID_CREATE = 16 PARSE_TOKEN_MAP = map[string]int{ "insert": TK_ID_INSERT, @@ -193,6 +194,7 @@ var ( "transaction": TK_ID_TRANSACTION, "show": TK_ID_SHOW, "truncate": TK_ID_TRUNCATE, + "create": TK_ID_CREATE, } // '*' COMMENT_PREFIX uint8 = 42 diff --git a/proxy/server/conn_pgsql.go b/proxy/server/conn_pgsql.go index 5d82ba2..2719bf9 100644 --- a/proxy/server/conn_pgsql.go +++ b/proxy/server/conn_pgsql.go @@ -32,6 +32,7 @@ import ( "time" "unsafe" + timecost "github.com/dablelv/go-huge-util" "github.com/jackc/pgproto3/v2" "github.com/jackc/pgx/v4" @@ -125,7 +126,7 @@ func (c *ClientConn) RunPg(ctx context.Context) { }() // flag for just use master node, just use for some special cases // treat it simple and crude, set in transaction - if strings.ToUpper(os.Getenv(config.SingleSession)) == "ON" { + if os.Getenv(config.SingleSession) == "true" { c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT c.status |= mysql.SERVER_STATUS_IN_TRANS c.alwaysCurNode = true @@ -147,9 +148,11 @@ func (c *ClientConn) RunPg(ctx context.Context) { return } c.configVer = c.proxy.configVer - golog.Debug(moduleName, "RunPg", - fmt.Sprintf("config reload ok, ver: %d", c.configVer), c.connectionId, - ) + if golog.GetLevel() <= 1 { + golog.Debug(moduleName, "RunPg", + fmt.Sprintf("config reload ok, ver: %d", c.configVer), c.connectionId) + } + } // handle receive msg @@ -191,7 +194,7 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte if err := simpleQuery.Decode(data); err != nil { return err } - err = cc.handleQueryPg(ctx, simpleQuery.String, append(header, data...)) + err = cc.handleQueryPg(ctx, simpleQuery.String, BytesCombine(header, data)) return err /* extend query protocol, msg send in sequence @@ -238,8 +241,10 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte cc.parseFlag = true cc.backendConn, err = cc.preHandlePg(sql, ctx) if err == nil { - golog.Debug(moduleName, "parse", - fmt.Sprintf("exec sql [%s] by node [%s]", sql, cc.backendConn.GetAddr()), cc.connectionId, "dbname", cc.db) + if golog.GetLevel() <= 1 { + golog.Debug(moduleName, "parse", + fmt.Sprintf("exec sql [%s] by node [%s]", sql, cc.backendConn.GetAddr()), cc.connectionId, "dbname", cc.db) + } } if cc != nil && cc.backendConn != nil { clientConnMap.Store(cc.connectionId, cc) @@ -254,16 +259,16 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte clientConnMap.Store(cc.connectionId, cc) // packaging send msg - cc.dataSend = append(cc.dataSend, header...) - cc.dataSend = append(cc.dataSend, data...) + cc.dataSend = BytesCombine(cc.dataSend, header) + cc.dataSend = BytesCombine(cc.dataSend, data) return nil case 'B': /* bind */ - cc.dataSend = append(cc.dataSend, header...) - cc.dataSend = append(cc.dataSend, data...) + cc.dataSend = BytesCombine(cc.dataSend, header) + cc.dataSend = BytesCombine(cc.dataSend, data) return nil case 'E': /* execute */ - cc.dataSend = append(cc.dataSend, header...) - cc.dataSend = append(cc.dataSend, data...) + cc.dataSend = BytesCombine(cc.dataSend, header) + cc.dataSend = BytesCombine(cc.dataSend, data) return nil case 'F': /* fastpath function call */ case 'C': /* close */ @@ -274,32 +279,32 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte err = cc.handleStmtClosePg(ctx, c) return err case 'D': /* describe */ - cc.dataSend = append(cc.dataSend, header...) - cc.dataSend = append(cc.dataSend, data...) + cc.dataSend = BytesCombine(cc.dataSend, header) + cc.dataSend = BytesCombine(cc.dataSend, data) return nil case 'H': /* flush */ // return cc.flush(ctx) case 'S': /* sync */ - cc.dataSend = append(cc.dataSend, header...) - cc.dataSend = append(cc.dataSend, data...) + cc.dataSend = BytesCombine(cc.dataSend, header) + cc.dataSend = BytesCombine(cc.dataSend, data) err = cc.handleStmtSyncPg(ctx) cc.dataSend = make([]byte, 0) return err case 'X': /*Client Terminate*/ return io.EOF case 'd': /* copy data */ - cc.dataSend = append(cc.dataSend, header...) - cc.dataSend = append(cc.dataSend, data...) + cc.dataSend = BytesCombine(cc.dataSend, header) + cc.dataSend = BytesCombine(cc.dataSend, data) return nil case 'c': /* copy done */ - cc.dataSend = append(cc.dataSend, header...) - cc.dataSend = append(cc.dataSend, data...) + cc.dataSend = BytesCombine(cc.dataSend, header) + cc.dataSend = BytesCombine(cc.dataSend, data) err = cc.handleCopy(ctx) cc.dataSend = make([]byte, 0) return err case 'f': /* copy fail */ - cc.dataSend = append(cc.dataSend, header...) - cc.dataSend = append(cc.dataSend, data...) + cc.dataSend = BytesCombine(cc.dataSend, header) + cc.dataSend = BytesCombine(cc.dataSend, data) err = cc.handleCopy(ctx) cc.dataSend = make([]byte, 0) return err @@ -311,6 +316,14 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte } func (cc *ClientConn) handleParsePrepare(ctx context.Context) error { + if os.Getenv(config.TimeCostFlag) == "true" { + c := timecost.TimeCost() + defer func() { + if golog.GetLevel() <= 3 { + golog.Warn("TIMECOST", "timecost", fmt.Sprintf("handleParsePrepare time cost is %v", c()), cc.connectionId) + } + }() + } var parseData []byte cc.Parse.Range(func(key, value interface{}) bool { parse := (value).(pgproto3.Parse) @@ -320,26 +333,44 @@ func (cc *ClientConn) handleParsePrepare(ctx context.Context) error { if parseData == nil || len(parseData) == 0 { return nil } - golog.Debug(moduleName, "handleParsePrepare", fmt.Sprintf("write cached parse data is: %s", string(parseData)), cc.connectionId) + if golog.GetLevel() <= 1 { + golog.Debug(moduleName, "handleParsePrepare", fmt.Sprintf("write cached parse data is: %s", string(parseData)), cc.connectionId) + } + err := cc.backendConn.Conn.WritePgPacket(parseData) if err != nil { - golog.Error(moduleName, "handleParsePrepare", fmt.Sprintf("write parse to connection err: %s", err.Error()), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "handleParsePrepare", fmt.Sprintf("write parse to connection err: %s", err.Error()), cc.connectionId) + } } return err } // handle simple query protocol func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte) error { + + if os.Getenv(config.TimeCostFlag) == "true" { + c := timecost.TimeCost() + defer func() { + if golog.GetLevel() <= 3 { + golog.Warn("TIMECOST", "timecost", fmt.Sprintf("handleQueryPg time cost is %v", c()), cc.connectionId) + } + }() + } var err error if cc.backendConn == nil || cc.backendConn.Conn == nil { cc.backendConn, err = cc.preHandlePg(sql, ctx) if err != nil { - golog.Error(moduleName, "handleQueryPg", err.Error(), 0, "sql", sql) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "handleQueryPg", err.Error(), 0, "sql", sql) + } return err } if cc != nil && cc.backendConn != nil { - clientConnMap.Store(cc.connectionId, cc) + if os.Getenv(config.CancelReqFlag) != "false" { + clientConnMap.Store(cc.connectionId, cc) + } cc.handleParsePrepare(ctx) } } else { @@ -353,19 +384,21 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte return errors.ErrConnIsNil } defer cc.closeConn(cc.backendConn, false) - golog.Debug(moduleName, "handleQueryPg", - fmt.Sprintf("exec sql [%s] by node [%s]", sql, cc.backendConn.GetAddr()), cc.connectionId, "dbname", cc.db) + if golog.GetLevel() <= 1 { + golog.Debug(moduleName, "handleQueryPg", + fmt.Sprintf("exec sql [%s] by node [%s]", sql, cc.backendConn.GetAddr()), cc.connectionId, "dbname", cc.db) + } // deal with duplicate "begin", will return 'WARNING: there is already a transaction in progress'. if cc.beginFlag == BEGIN_PRESTART_COMMIT && "BEGIN" == strings.ToUpper(strings.ReplaceAll(sql, ";", "")) { errRes := pgproto3.ErrorResponse{ - Severity: "WARNING", + Severity: "WARNING", SeverityUnlocalized: "WARNING", - Code: "25001", - Message: "there is already a transaction in progress", - File: "xact.c", - Line: 3689, - Routine: "BeginTransactionBlock", + Code: "25001", + Message: "there is already a transaction in progress", + File: "xact.c", + Line: 3689, + Routine: "BeginTransactionBlock", } var nRes pgproto3.NoticeResponse nRes = pgproto3.NoticeResponse(errRes) @@ -391,14 +424,19 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte sqlStr := "BEGIN;" err = cc.backendConn.Conn.WritePgPacket((&pgproto3.Query{String: sqlStr}).Encode(nil)) if err != nil { - golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) + } return err } // consume msg from backend, but not return to client + header := make([]byte, 5) for { - d, e := cc.backendConn.Conn.ReadPgPacket() + d, e := cc.backendConn.Conn.ReadPgPacket(header) if e != nil { - golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", e.Error()), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", e.Error()), cc.connectionId) + } return e } if d[0] == 'Z' { @@ -410,7 +448,9 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte // exec current statement err = cc.backendConn.Conn.WritePgPacket(data) if err != nil { - golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) + } return err } // reset flag @@ -419,14 +459,18 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte } else { err = cc.backendConn.Conn.WritePgPacket(data) if err != nil { - golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) + } return err } } err = cc.receiveBackendMsg(ctx) if err != nil { - golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("receiveBackend msg err: %s", err.Error()), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("receiveBackend msg err: %s", err.Error()), cc.connectionId) + } return err } @@ -443,23 +487,145 @@ func stringTobyteSlice(s string) []byte { } +func slicePgMsg(msg []byte) (res [][]byte) { + res = make([][]byte, 0) + if len(msg) == 0 { + return res + } + for len(msg) > 0 { + msgLen := binary.BigEndian.Uint32(msg[1:5]) + res = append(res, msg[:1+msgLen]) + msg = msg[1+msgLen:] + } + return res +} + +// receive server connection msg, add deal with it + +//func (cc *ClientConn) receiveBackendMsg(ctx context.Context) error { +// msg, err := cc.backendConn.Conn.ReadPgAllPacket() +// if err != nil { +// golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", err.Error()), cc.connectionId) +// return err +// } +// golog.Trace(moduleName, "receiveBackendMsg", fmt.Sprintf("recv packet from backend msg type: %s", string(msg)), cc.connectionId) +// dataList := slicePgMsg(msg) +//readloop: +// for _, data := range dataList { +// // deal with copy msg +// if data[0] == 'G' || data[0] == 'W' { +// // in transaction +// cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT +// cc.status |= mysql.SERVER_STATUS_IN_TRANS +// cc.dataRecv = append(cc.dataRecv, data...) +// cc.WriteData(cc.dataRecv) +// cc.dataRecv = make([]byte, 0) +// break readloop +// } +// if data[0] == 'H' { +// cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT +// cc.status |= mysql.SERVER_STATUS_IN_TRANS +// cc.dataRecv = append(cc.dataRecv, data...) +// cc.WriteData(cc.dataRecv) +// cc.dataRecv = make([]byte, 0) +// continue +// } +// // add new protocol 'L' for read consistency +// if data[0] == 'L' { +// lsn := pgproto3.LsnResponse{} +// lsn.Decode(data[5:]) +// addr := cc.backendConn.ConnPg.PgConn().Conn().RemoteAddr().String() +// golog.Debug("pg conn", "receiveBackendMsg", fmt.Sprintf("addr: %s, lsn: %d", addr, lsn.LSN), cc.connectionId) +// // set LSN to node +// if addr != "" { +// cc.nodes["node1"].NodeLsn.Store(strings.Split(addr, ":")[0], lsn.LSN) +// } +// // set LSN to db_table +// if cc.table != "" && cc.db != "" { +// cc.nodes["node1"].NodeLsn.Store(cc.db+"_"+cc.table, lsn.LSN) +// } +// continue +// } +// +// // deal with msg for readForQuery. return msg +// if data[0] == 'Z' { +// q := pgproto3.ReadyForQuery{} +// q.Decode(data[5:]) +// // deal with 'begin-commit' statement, if begin-select will return 'T' for front, +// // means in transaction, actually backend not in transaction. Do sql with load balance +// if cc.beginFlag == BEGIN_PRESTART_COMMIT { +// data = (&pgproto3.ReadyForQuery{TxStatus: 'T'}).Encode(nil) +// } else if cc.beginFlag == BEGIN_RELSTART_BEGIN { +// if q.TxStatus == 'I' && !cc.alwaysCurNode { +// cc.status = mysql.SERVER_STATUS_AUTOCOMMIT +// } else { +// cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT +// cc.status |= mysql.SERVER_STATUS_IN_TRANS +// } +// cc.beginFlag = BEGIN_COMMIT +// } else { +// if q.TxStatus == 'T' && !cc.isInTransaction() { +// cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT +// cc.status |= mysql.SERVER_STATUS_IN_TRANS +// } else if q.TxStatus == 'I' && !cc.alwaysCurNode { +// //cc.status |= mysql.SERVER_STATUS_AUTOCOMMIT +// //cc.status &= ^mysql.SERVER_STATUS_IN_TRANS +// if cc.isInTransaction() { +// cc.status = mysql.SERVER_STATUS_AUTOCOMMIT +// } +// if cc.beginFlag != BEGIN_UNSTART { +// cc.beginFlag = BEGIN_UNSTART +// } +// } +// } +// +// cc.dataRecv = append(cc.dataRecv, data...) +// err = cc.WriteData(cc.dataRecv) +// if err != nil { +// golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("write data to backend err: %v", err), cc.connectionId) +// } +// cc.dataRecv = make([]byte, 0) +// break readloop +// } +// if data[0] == 'E' { +// golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read err packet from backend: %s", string(data)), cc.connectionId) +// } +// cc.dataRecv = append(cc.dataRecv, data...) +// } +// return nil +//} + // receive server connection msg, add deal with it func (cc *ClientConn) receiveBackendMsg(ctx context.Context) error { + if os.Getenv(config.TimeCostFlag) == "true" { + c := timecost.TimeCost() + defer func() { + if golog.GetLevel() <= 3 { + golog.Warn("TIMECOST", "timecost", fmt.Sprintf("receiveBackendMsg time cost is %v", c()), cc.connectionId) + } + }() + } + header := make([]byte, 5) + readloop: for { - data, err := cc.backendConn.Conn.ReadPgPacket() + data, err := cc.backendConn.Conn.ReadPgPacket(header) if err != nil { - golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", err.Error()), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", err.Error()), cc.connectionId) + } return err } - golog.Trace(moduleName, "receiveBackendMsg", fmt.Sprintf("recv packet from backend msg type: %s", string(data[0])), cc.connectionId) + if golog.GetLevel() <= 0 { + golog.Trace(moduleName, "receiveBackendMsg", fmt.Sprintf("recv packet from backend msg type: %s", string(data[0])), cc.connectionId) + } // deal with copy msg if data[0] == 'G' || data[0] == 'W' { // in transaction cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT cc.status |= mysql.SERVER_STATUS_IN_TRANS - cc.dataRecv = append(cc.dataRecv, data...) + cc.dataRecv = BytesCombine(cc.dataRecv, data) cc.WriteData(cc.dataRecv) cc.dataRecv = make([]byte, 0) break readloop @@ -467,7 +633,7 @@ readloop: if data[0] == 'H' { cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT cc.status |= mysql.SERVER_STATUS_IN_TRANS - cc.dataRecv = append(cc.dataRecv, data...) + cc.dataRecv = BytesCombine(cc.dataRecv, data) cc.WriteData(cc.dataRecv) cc.dataRecv = make([]byte, 0) continue @@ -477,7 +643,9 @@ readloop: lsn := pgproto3.LsnResponse{} lsn.Decode(data[5:]) addr := cc.backendConn.ConnPg.PgConn().Conn().RemoteAddr().String() - golog.Debug("pg conn", "receiveBackendMsg", fmt.Sprintf("addr: %s, lsn: %d", addr, lsn.LSN), cc.connectionId) + if golog.GetLevel() <= 1 { + golog.Debug("pg conn", "receiveBackendMsg", fmt.Sprintf("addr: %s, lsn: %d", addr, lsn.LSN), cc.connectionId) + } // set LSN to node if addr != "" { cc.nodes["node1"].NodeLsn.Store(strings.Split(addr, ":")[0], lsn.LSN) @@ -521,19 +689,24 @@ readloop: } } - cc.dataRecv = append(cc.dataRecv, data...) + cc.dataRecv = BytesCombine(cc.dataRecv, data) err = cc.WriteData(cc.dataRecv) if err != nil { - golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("write data to backend err: %v", err), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("write data to backend err: %v", err), cc.connectionId) + } + } cc.dataRecv = make([]byte, 0) break readloop } if data[0] == 'E' { - golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read err packet from backend: %s", string(data)), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read err packet from backend: %s", string(data)), cc.connectionId) + } } - cc.dataRecv = append(cc.dataRecv, data...) + cc.dataRecv = BytesCombine(cc.dataRecv, data) // TODO At present, all data are returned. // In the future, we need to consider the situation of multiple data. // We need to set a threshold and return in batches @@ -768,7 +941,7 @@ func (cc *ClientConn) handleStartupMessage(ctx context.Context, startupMessage * // connection env 'HE3PROXY' need support by db engine // if you use original postgres, please close this flag or will encounter an error like: // FATAL: unrecognized configuration parameter "he3proxy" (SQLSTATE 42704) - if strings.ToUpper(os.Getenv(config.ConnFlag)) != "FALSE" { + if os.Getenv(config.ConnFlag) != "false" { os.Setenv("HE3PROXY", "true") } if cc.db == "" { @@ -825,9 +998,10 @@ func (cc *ClientConn) handleStartupMessage(ctx context.Context, startupMessage * if err := cc.writeReadyForQuery(ctx, 'I'); err != nil { return err } - - golog.Info(moduleName, "handleStartupMessage", - fmt.Sprintf("%s connection succeeded", cc.c.RemoteAddr().String()), cc.connectionId) + if golog.GetLevel() <= 2 { + golog.Info(moduleName, "handleStartupMessage", + fmt.Sprintf("%s connection succeeded", cc.c.RemoteAddr().String()), cc.connectionId) + } cc.pkg.Sequence = 0 @@ -1039,7 +1213,9 @@ func (cc *ClientConn) writeSSLRequest(ctx context.Context, pgRequestSSL byte) er func (cc *ClientConn) WriteData(data []byte) error { if n, err := cc.pkg.Wb.Write(data); err != nil { - golog.Error(moduleName, "WriteData", fmt.Sprintf("write data to backend err: %v", err), cc.connectionId) + if golog.GetLevel() <= 4 { + golog.Error(moduleName, "WriteData", fmt.Sprintf("write data to backend err: %v", err), cc.connectionId) + } return mysql.ErrBadConn } else if n != len(data) { return mysql.ErrBadConn diff --git a/proxy/server/conn_preshard.go b/proxy/server/conn_preshard.go index 0c223b9..055b081 100644 --- a/proxy/server/conn_preshard.go +++ b/proxy/server/conn_preshard.go @@ -23,6 +23,7 @@ import ( "github.com/auxten/postgresql-parser/pkg/sql/parser" "github.com/auxten/postgresql-parser/pkg/sql/sem/tree" "github.com/auxten/postgresql-parser/pkg/walk" + timecost "github.com/dablelv/go-huge-util" "gitee.com/he3db/he3proxy/backend" "gitee.com/he3db/he3proxy/config" @@ -114,8 +115,11 @@ func (c *ClientConn) preHandleShard(sql string) (bool, error) { } if len(rs) == 0 { - msg := fmt.Sprintf("result is empty") - golog.Error("conn_preshard", "handleUnsupport", msg, 0, "sql", sql) + msg := "result is empty" + if golog.GetLevel() <= 4 { + golog.Error("conn_preshard", "handleUnsupport", msg, 0, "sql", sql) + } + return false, mysql.NewError(mysql.ER_UNKNOWN_ERROR, msg) } @@ -137,6 +141,15 @@ func (c *ClientConn) preHandleShard(sql string) (bool, error) { //preprocessing sql before parse sql func (c *ClientConn) preHandlePg(sql string, ctx context.Context) (*backend.BackendConn, error) { + + if os.Getenv(config.TimeCostFlag) == "true" { + tc := timecost.TimeCost() + defer func() { + if golog.GetLevel() <= 3 { + golog.Warn("TIMECOST", "timecost", fmt.Sprintf("preHandlePg time cost is %v", tc()),c.connectionId) + } + }() + } var err error var executeDB *ExecuteDB @@ -228,8 +241,19 @@ func (c *ClientConn) GetTransExecDB(tokens []string, sql string) (*ExecuteDB, er //if sql need shard return nil, else return the unshard db func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) { - golog.Debug("conn_preshard", "GetExecDB", - fmt.Sprintf("tokens: [%v], sql: [%s]", tokens, sql), c.connectionId) + + if os.Getenv(config.TimeCostFlag) == "true" { + tc := timecost.TimeCost() + defer func() { + if golog.GetLevel() <= 3 { + golog.Warn("TIMECOST", "timecost", fmt.Sprintf("GetExecDB time cost is %v", tc()), c.connectionId) + } + }() + } + if golog.GetLevel() <= 1 { + golog.Debug("conn_preshard", "GetExecDB", + fmt.Sprintf("tokens: [%v], sql: [%s]", tokens, sql), c.connectionId) + } tokensLen := len(tokens) // The logic of PG node selection is processed. // Since there is no need to divide the database and table, the original kingshard logic is removed. @@ -269,6 +293,59 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) break } } + // simple parse, just use lex analysis and lb for cache mode will change to weight + if os.Getenv(config.SimpleParseFlag) == "true" { + f = false + tokenId, ok := mysql.PARSE_TOKEN_MAP[strings.ToLower(tokens[0])] + if ok == true { + switch tokenId { + case mysql.TK_ID_SELECT, mysql.TK_ID_SHOW: + if strings.Contains(sql, "for update") || + strings.Contains(sql, "FOR UPDATE") || + strings.Contains(sql, "for share") || + strings.Contains(sql, "FOR SHARE") || + strings.Contains(sql, "setval") || + strings.Contains(sql, "SETVAL") || + strings.Contains(sql, "nextval") || + strings.Contains(sql, "NEXTVAL") { + break + } + return c.getSelectExecDB(sql, tokens, tokensLen) + case mysql.TK_ID_BEGIN: + if c.parseFlag { + c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT + c.status |= mysql.SERVER_STATUS_IN_TRANS + break + } + // replica node also can exec begin + if c.beginFlag == BEGIN_UNSTART { + c.beginFlag = BEGIN_PRESTART + } + return c.getSelectExecDB(sql, tokens, tokensLen) + case mysql.TK_ID_COMMIT: + if c.isInTransaction() { + break + } else { + if c.beginFlag == BEGIN_PRESTART_COMMIT { + c.beginFlag = BEGIN_RELSTART + } + return c.getSelectExecDB(sql, tokens, tokensLen) + } + case mysql.TK_ID_CREATE: + if strings.Contains(strings.ToUpper(sql), "CREATE TEMPORARY TABLE") { + c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT + c.status |= mysql.SERVER_STATUS_IN_TRANS + } + break + case mysql.TK_ID_SET: + c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT + c.status |= mysql.SERVER_STATUS_IN_TRANS + c.alwaysCurNode = true + break + } + } + } + // parse by crdb if f { stmts, err := parser.Parse(sql) tb_names := make([]string, 0) @@ -284,8 +361,10 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) } _, _ = w.Walk(stmts, nil) if err != nil { - golog.Error("conn_preshard", "GetExecDB", - fmt.Sprintf("Can't parse sql: [%s], err: [%s]", sql, err.Error()), c.connectionId) + if golog.GetLevel() <= 4 { + golog.Error("conn_preshard", "GetExecDB", + fmt.Sprintf("Can't parse sql: [%s], err: [%s]", sql, err.Error()), c.connectionId) + } } else { for _, stmt := range stmts { switch n := stmt.AST.(type) { diff --git a/proxy/server/server.go b/proxy/server/server.go index 7bdae21..d5efe21 100644 --- a/proxy/server/server.go +++ b/proxy/server/server.go @@ -334,6 +334,7 @@ func (s *Server) newClientConn(co net.Conn) *ClientConn { // meaning that data is sent as soon as possible after a Write. //I set this option false. tcpConn.SetNoDelay(false) + //tcpConn.SetKeepAlive(true) c.c = tcpConn func() { @@ -418,7 +419,9 @@ func (s *Server) onConn(c net.Conn, dbType string) { ctx := context.Background() if err := conn.handshake(ctx); err != nil { - golog.Error("server", "onConn", err.Error(), 0) + if golog.GetLevel() <= 4 { + golog.Error("server", "onConn", err.Error(), 0) + } conn.writePgErr(ctx, "08006", err.Error()) conn.Close() return @@ -433,14 +436,18 @@ func (s *Server) onConn(c net.Conn, dbType string) { // init all db-node connection pool masterNode := s.nodes["node1"].Master if err := masterNode.InitConnPoolPg(conn.db, conn.user); err != nil { - golog.Error("server", "onConn", fmt.Sprintf("masterNode %s InitConnPoolPg err: %s", masterNode.Addr(), err.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("server", "onConn", fmt.Sprintf("masterNode %s InitConnPoolPg err: %s", masterNode.Addr(), err.Error()), 0) + } conn.writePgErr(ctx, "53300", err.Error()) conn.Close() return } for _, db := range s.nodes["node1"].Slave { if err := db.InitConnPoolPg(conn.db, conn.user); err != nil { - golog.Error("server", "onConn", fmt.Sprintf("Slave %s InitConnPoolPg err: %s", db.Addr(), err.Error()), 0) + if golog.GetLevel() <= 4 { + golog.Error("server", "onConn", fmt.Sprintf("Slave %s InitConnPoolPg err: %s", db.Addr(), err.Error()), 0) + } conn.writePgErr(ctx, "53300", err.Error()) conn.Close() return @@ -850,7 +857,9 @@ func (s *Server) UpdateConfig(newCfg *config.Config) { for user := range newUserList { if _, exist := newSchemas[user]; !exist { - golog.Error("Server", "UpdateConfig", fmt.Sprintf("user [%s] must have a schema", user), 0) + if golog.GetLevel() <= 4 { + golog.Error("Server", "UpdateConfig", fmt.Sprintf("user [%s] must have a schema", user), 0) + } return } } diff --git a/proxy/server/util.go b/proxy/server/util.go index d4eebbf..2dd62c7 100644 --- a/proxy/server/util.go +++ b/proxy/server/util.go @@ -66,3 +66,13 @@ func (t *IPInfo) Match(ip net.IP) bool { return t.ip.Equal(ip) } } + +func BytesCombine(pBytes [] byte, data []byte) []byte { + return append(pBytes, data...) + //return bytes.Join(pBytes, []byte{}) + //src := make([]byte,0) + //for _, pByte := range pBytes { + // src = append(src,pByte...) + //} + //return src +} \ No newline at end of file -- Gitee From 3c0812fb4d20e6e3419f27793dc75bd1efff2212 Mon Sep 17 00:00:00 2001 From: wangyao Date: Thu, 10 Nov 2022 17:10:41 +0800 Subject: [PATCH 3/7] improve: performance reduce makeslice --- backend/backend_conn.go | 23 +++++++--- proxy/server/conn.go | 1 + proxy/server/conn_pgsql.go | 94 +++++++++++++++++--------------------- proxy/server/server.go | 5 +- 4 files changed, 62 insertions(+), 61 deletions(-) diff --git a/backend/backend_conn.go b/backend/backend_conn.go index f34e2f8..bfc19e3 100644 --- a/backend/backend_conn.go +++ b/backend/backend_conn.go @@ -61,6 +61,8 @@ type Conn struct { // pg param ConnPg pgx.Conn + + recvData []byte } func (c *Conn) Connect(addr string, user string, password string, db string) error { @@ -166,6 +168,7 @@ func (c *Conn) ReConnectPg() error { c.pkg = mysql.NewPacketIO(c.ConnPg.PgConn().Conn().(*net.TCPConn)) + c.recvData = make([]byte, 0, 16192) //if err := c.ConnPg.Ping(context.Background()); err != nil { // golog.Error("backend conn", "ReConnectPg", "pgx.Ping", 0, "urlExample", urlExample, "error", err.Error()) // c.ConnPg.Close(context.Background()) @@ -792,19 +795,25 @@ func (c *Conn) WritePgPacket(data []byte) error { return err } -func (c *Conn) ReadPgPacket(header []byte) ([]byte, error) { - if _, err := io.ReadFull(c.pkg.Rb, header); err != nil { +func (c *Conn) ReadPgPacket(data []byte) ([]byte, error) { + // read header + if _, err := io.ReadFull(c.pkg.Rb, data); err != nil { return nil, err } - msgLen := binary.BigEndian.Uint32(header[1:]) - 4 + msgLen := int(binary.BigEndian.Uint32(data[1:]) - 4) - // Get the specific information of the request - msg := make([]byte, msgLen) - if _, err := io.ReadFull(c.pkg.Rb, msg); err != nil { + //build receive byte + if msgLen > cap(c.recvData){ + c.recvData = make([]byte, msgLen, msgLen) + }else { + c.recvData = c.recvData[:msgLen] + } + if _, err := io.ReadAtLeast(c.pkg.Rb, c.recvData, msgLen); err != nil { return nil, err } - return append(header, msg...), nil + data = append(data, c.recvData...) + return data, nil } // pg msg streaming can not through this way to get all msg diff --git a/proxy/server/conn.go b/proxy/server/conn.go index 406e2c9..3a1d371 100644 --- a/proxy/server/conn.go +++ b/proxy/server/conn.go @@ -86,6 +86,7 @@ type ClientConn struct { // save receive data from backend, will send to client dataRecv []byte + dataHeader []byte // flag for use extended query protocol parseFlag bool diff --git a/proxy/server/conn_pgsql.go b/proxy/server/conn_pgsql.go index 2719bf9..63c403c 100644 --- a/proxy/server/conn_pgsql.go +++ b/proxy/server/conn_pgsql.go @@ -262,13 +262,14 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) return nil - case 'B': /* bind */ - cc.dataSend = BytesCombine(cc.dataSend, header) - cc.dataSend = BytesCombine(cc.dataSend, data) - return nil - case 'E': /* execute */ + case 'B', 'D', 'E', 'd': /* bind */ /* describe */ /* execute */ /* copy data */ cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) + if (len(cc.dataSend) + 1<<12) > mysql.MaxPayloadLen { + err = cc.handleStmtSyncPg(ctx) + cc.dataSend = cc.dataSend[:0] + return err + } return nil case 'F': /* fastpath function call */ case 'C': /* close */ @@ -278,35 +279,27 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte } err = cc.handleStmtClosePg(ctx, c) return err - case 'D': /* describe */ - cc.dataSend = BytesCombine(cc.dataSend, header) - cc.dataSend = BytesCombine(cc.dataSend, data) - return nil case 'H': /* flush */ // return cc.flush(ctx) case 'S': /* sync */ cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) err = cc.handleStmtSyncPg(ctx) - cc.dataSend = make([]byte, 0) + cc.dataSend = cc.dataSend[:0] return err case 'X': /*Client Terminate*/ return io.EOF - case 'd': /* copy data */ - cc.dataSend = BytesCombine(cc.dataSend, header) - cc.dataSend = BytesCombine(cc.dataSend, data) - return nil case 'c': /* copy done */ cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) err = cc.handleCopy(ctx) - cc.dataSend = make([]byte, 0) + cc.dataSend = cc.dataSend[:0] return err case 'f': /* copy fail */ cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) err = cc.handleCopy(ctx) - cc.dataSend = make([]byte, 0) + cc.dataSend = cc.dataSend[:0] return err default: return errors.ErrFormat("command %d not supported now", cmd) @@ -597,20 +590,13 @@ func slicePgMsg(msg []byte) (res [][]byte) { // receive server connection msg, add deal with it func (cc *ClientConn) receiveBackendMsg(ctx context.Context) error { - - if os.Getenv(config.TimeCostFlag) == "true" { - c := timecost.TimeCost() - defer func() { - if golog.GetLevel() <= 3 { - golog.Warn("TIMECOST", "timecost", fmt.Sprintf("receiveBackendMsg time cost is %v", c()), cc.connectionId) - } - }() - } - header := make([]byte, 5) + var err error + cc.dataRecv = cc.dataRecv[:0] readloop: for { - data, err := cc.backendConn.Conn.ReadPgPacket(header) + cc.dataHeader = cc.dataHeader[:5] + cc.dataHeader, err = cc.backendConn.Conn.ReadPgPacket(cc.dataHeader) if err != nil { if golog.GetLevel() <= 4 { golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", err.Error()), cc.connectionId) @@ -618,30 +604,30 @@ readloop: return err } if golog.GetLevel() <= 0 { - golog.Trace(moduleName, "receiveBackendMsg", fmt.Sprintf("recv packet from backend msg type: %s", string(data[0])), cc.connectionId) + golog.Trace(moduleName, "receiveBackendMsg", fmt.Sprintf("recv packet from backend msg type: %s", string(cc.dataHeader[0])), cc.connectionId) } // deal with copy msg - if data[0] == 'G' || data[0] == 'W' { + if cc.dataHeader[0] == 'G' || cc.dataHeader[0] == 'W' { // in transaction cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT cc.status |= mysql.SERVER_STATUS_IN_TRANS - cc.dataRecv = BytesCombine(cc.dataRecv, data) + cc.dataRecv = BytesCombine(cc.dataRecv, cc.dataHeader) cc.WriteData(cc.dataRecv) - cc.dataRecv = make([]byte, 0) + cc.dataRecv = cc.dataRecv[:0] break readloop } - if data[0] == 'H' { + if cc.dataHeader[0] == 'H' { cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT cc.status |= mysql.SERVER_STATUS_IN_TRANS - cc.dataRecv = BytesCombine(cc.dataRecv, data) + cc.dataRecv = BytesCombine(cc.dataRecv, cc.dataHeader) cc.WriteData(cc.dataRecv) - cc.dataRecv = make([]byte, 0) + cc.dataRecv = cc.dataRecv[:0] continue } // add new protocol 'L' for read consistency - if data[0] == 'L' { + if cc.dataHeader[0] == 'L' { lsn := pgproto3.LsnResponse{} - lsn.Decode(data[5:]) + lsn.Decode(cc.dataHeader[5:]) addr := cc.backendConn.ConnPg.PgConn().Conn().RemoteAddr().String() if golog.GetLevel() <= 1 { golog.Debug("pg conn", "receiveBackendMsg", fmt.Sprintf("addr: %s, lsn: %d", addr, lsn.LSN), cc.connectionId) @@ -658,13 +644,13 @@ readloop: } // deal with msg for readForQuery. return msg - if data[0] == 'Z' { + if cc.dataHeader[0] == 'Z' { q := pgproto3.ReadyForQuery{} - q.Decode(data[5:]) + q.Decode(cc.dataHeader[5:]) // deal with 'begin-commit' statement, if begin-select will return 'T' for front, // means in transaction, actually backend not in transaction. Do sql with load balance if cc.beginFlag == BEGIN_PRESTART_COMMIT { - data = (&pgproto3.ReadyForQuery{TxStatus: 'T'}).Encode(nil) + cc.dataHeader = (&pgproto3.ReadyForQuery{TxStatus: 'T'}).Encode(nil) } else if cc.beginFlag == BEGIN_RELSTART_BEGIN { if q.TxStatus == 'I' && !cc.alwaysCurNode { cc.status = mysql.SERVER_STATUS_AUTOCOMMIT @@ -689,7 +675,7 @@ readloop: } } - cc.dataRecv = BytesCombine(cc.dataRecv, data) + cc.dataRecv = BytesCombine(cc.dataRecv, cc.dataHeader) err = cc.WriteData(cc.dataRecv) if err != nil { if golog.GetLevel() <= 4 { @@ -697,23 +683,23 @@ readloop: } } - cc.dataRecv = make([]byte, 0) + cc.dataRecv = cc.dataRecv[:0] break readloop } - if data[0] == 'E' { + if cc.dataHeader[0] == 'E' { if golog.GetLevel() <= 4 { - golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read err packet from backend: %s", string(data)), cc.connectionId) + golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read err packet from backend: %s", string(cc.dataHeader)), cc.connectionId) } } - cc.dataRecv = BytesCombine(cc.dataRecv, data) + cc.dataRecv = BytesCombine(cc.dataRecv, cc.dataHeader) // TODO At present, all data are returned. // In the future, we need to consider the situation of multiple data. // We need to set a threshold and return in batches - //if len(cc.dataRecv) > 16*1024 { - // cc.WriteData(cc.dataRecv) - // cc.dataRecv = make([]byte, 0) - //} + if (len(cc.dataRecv) + 1<<12) > mysql.MaxPayloadLen { + cc.WriteData(cc.dataRecv) + cc.dataRecv = cc.dataRecv[:0] + } } return nil } @@ -1167,12 +1153,16 @@ func (cc *ClientConn) readPacketPg() ([]byte, []byte, error) { if _, err := io.ReadFull(cc.pkg.Rb, header); err != nil { return nil, nil, err } - msgLen := binary.BigEndian.Uint32(header[1:]) - 4 - msg := make([]byte, msgLen) - if _, err := io.ReadFull(cc.pkg.Rb, msg); err != nil { + msgLen := int(binary.BigEndian.Uint32(header[1:]) - 4) + if msgLen > cap(cc.dataRecv){ + cc.dataRecv = make([]byte, msgLen) + }else { + cc.dataRecv = cc.dataRecv[:msgLen] + } + if _, err := io.ReadAtLeast(cc.pkg.Rb, cc.dataRecv, msgLen); err != nil { return header, nil, err } - return header, msg, nil + return header, cc.dataRecv, nil } // writeAuthenticationOK diff --git a/proxy/server/server.go b/proxy/server/server.go index d5efe21..63cf6c7 100644 --- a/proxy/server/server.go +++ b/proxy/server/server.go @@ -369,8 +369,9 @@ func (s *Server) newClientConn(co net.Conn) *ClientConn { c.beginFlag = BEGIN_UNSTART c.alwaysCurNode = false - c.dataSend = make([]byte, 0) - c.dataRecv = make([]byte, 0) + c.dataSend = make([]byte, 0, mysql.MaxPayloadLen) + c.dataRecv = make([]byte, 0, mysql.MaxPayloadLen) + c.dataHeader = make([]byte, 5, 8192) c.parseFlag = false return c -- Gitee From 480925e1f1da54b0a8c6b85cc45b5e83f43b34bd Mon Sep 17 00:00:00 2001 From: wangyao Date: Fri, 11 Nov 2022 19:07:06 +0800 Subject: [PATCH 4/7] improve: performance epollwait --- backend/backend_conn.go | 17 +- backend/balancer.go | 20 +- backend/db.go | 401 ++++++++++++++++++-------------- backend/node.go | 32 +-- backend/pg_prometheus_metric.go | 30 +-- cmd/he3proxy/main.go | 47 +++- config/config.go | 41 ++-- mysql/packetio.go | 2 +- proxy/server/conn_pgsql.go | 70 +++--- proxy/server/conn_preshard.go | 36 ++- proxy/server/conn_query.go | 7 +- proxy/server/server.go | 16 +- 12 files changed, 392 insertions(+), 327 deletions(-) diff --git a/backend/backend_conn.go b/backend/backend_conn.go index bfc19e3..f0a4641 100644 --- a/backend/backend_conn.go +++ b/backend/backend_conn.go @@ -22,7 +22,6 @@ import ( "fmt" "io" "net" - "os" "strings" "time" @@ -75,7 +74,7 @@ func (c *Conn) Connect(addr string, user string, password string, db string) err c.collation = mysql.DEFAULT_COLLATION_ID c.charset = mysql.DEFAULT_CHARSET // Determine whether PG or MySQL link - if config.Mysql == os.Getenv(config.DbTypeEnv) { + if config.Mysql == config.DbType { return c.ReConnect() } else { // postgresql @@ -164,11 +163,11 @@ func (c *Conn) ReConnectPg() error { // meaning that data is sent as soon as possible after a Write. //I set this option false. c.ConnPg.PgConn().Conn().(*net.TCPConn).SetNoDelay(false) - //c.ConnPg.PgConn().Conn().(*net.TCPConn).SetKeepAlive(true) + c.ConnPg.PgConn().Conn().(*net.TCPConn).SetKeepAlive(true) c.pkg = mysql.NewPacketIO(c.ConnPg.PgConn().Conn().(*net.TCPConn)) - c.recvData = make([]byte, 0, 16192) + c.recvData = make([]byte, 0, 16384) //if err := c.ConnPg.Ping(context.Background()); err != nil { // golog.Error("backend conn", "ReConnectPg", "pgx.Ping", 0, "urlExample", urlExample, "error", err.Error()) // c.ConnPg.Close(context.Background()) @@ -181,11 +180,11 @@ func (c *Conn) ReConnectPg() error { func (c *Conn) Close() error { if c.conn != nil { c.conn.Close() + c.conn = nil + c.salt = nil + c.pkgErr = nil + c.ConnPg.Close(context.Background()) } - c.conn = nil - c.salt = nil - c.pkgErr = nil - c.ConnPg.Close(context.Background()) return nil } @@ -399,7 +398,7 @@ func (c *Conn) writeCommandStrStr(command byte, arg1 string, arg2 string) error } func (c *Conn) Ping() error { - if config.Mysql == os.Getenv(config.DbTypeEnv) { + if config.Mysql == config.DbType { if err := c.writeCommand(mysql.COM_PING); err != nil { return err } diff --git a/backend/balancer.go b/backend/balancer.go index cc54cdc..9b10251 100644 --- a/backend/balancer.go +++ b/backend/balancer.go @@ -17,7 +17,6 @@ package backend import ( "fmt" "math/rand" - "os" "strings" "time" @@ -105,6 +104,9 @@ func (n *Node) getNextSlaveByWeight() (*DB, error) { db := n.Slave[index] n.LastSlaveIndex++ n.LastSlaveIndex = n.LastSlaveIndex % queueLen + if golog.GetLevel() <= golog.LevelDebug { + golog.Debug("balancer", "getNextSlaveByWeight", "selected slave node", 0, "addr", db.addr) + } return db, nil } @@ -115,7 +117,7 @@ func (n *Node) GetNextSlave() (*DB, error) { // Determine whether the LSN number meets the requirements // first get current db_table's LSN, if not exist will use node's LSN var masterLsn uint64 - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("Node Cache Key: [%s]", n.NodeCacheKey), 0) } @@ -124,13 +126,13 @@ func (n *Node) GetNextSlave() (*DB, error) { val, _ = n.NodeLsn.Load(strings.Split(n.Master.addr, ":")[0]) masterLsn = val.(uint64) // parse string LSN to uint64 - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("master node addr: [%s], 10hex lsn: [%d]", n.Master.addr, masterLsn), 0) } } else { masterLsn = val.(uint64) - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("current use table [%s] latest LSN, 10hex lsn: [%d]", n.NodeCacheKey, masterLsn), 0) } @@ -141,7 +143,7 @@ func (n *Node) GetNextSlave() (*DB, error) { val, flag = n.NodeLsn.Load(strings.Split(db.addr, ":")[0]) if flag { parseUint := val.(uint64) - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("slave node index: [%d], addr: [%s], 10hex lsn: [%d]", i, db.addr, parseUint), 0) } @@ -189,7 +191,7 @@ func (n *Node) GetNextSlave() (*DB, error) { return n.Slave[index], nil } else if n.Cfg.LoadBalanceMode == "cache" { // if use simple parse will use lex analysis, and can not get table name, so change to weight mode - if os.Getenv(config.SimpleParseFlag) == "true" { + if config.SimpleParse { return n.getNextSlaveByWeight() } // "cache" means switch cached node first @@ -214,7 +216,7 @@ func (n *Node) getNextSlaveByCache() []int { val, flag := n.NodeCache.Load(n.NodeCacheKey) if flag { tableCachedNodes := val.([]string) - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("table cached key:%s, nodes: %s", n.NodeCacheKey, tableCachedNodes), 0) } if tableCachedNodes != nil && len(tableCachedNodes) > 0 { @@ -223,14 +225,14 @@ func (n *Node) getNextSlaveByCache() []int { //if cacheNode == strings.Split(db.addr, ":")[0] { // cacheNode ip:port if cacheNode == db.addr { - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("cacheNode matched addr: %s", cacheNode), 0) } indexs = append(indexs, i) } } } - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug("balancer", "GetNextSlave", fmt.Sprintf("cacheNode matched indexs: %v", indexs), 0) } } diff --git a/backend/db.go b/backend/db.go index 056528b..0d19a16 100644 --- a/backend/db.go +++ b/backend/db.go @@ -15,15 +15,10 @@ package backend import ( - "context" - "fmt" - "os" "sync" "sync/atomic" "time" - timecost "github.com/dablelv/go-huge-util" - "gitee.com/he3db/he3proxy/config" "gitee.com/he3db/he3proxy/core/golog" @@ -63,7 +58,6 @@ type DB struct { popConnCount int64 //pg - cacheConnsMap sync.Map cacheCountNum int32 currConnCount int64 //当前总链接数 maxPoolNum int32 //支持的最大db链接池 @@ -96,7 +90,7 @@ func Open(addr string, user string, password string, dbName string, maxConnNum i } //因pg链接通道与db绑定,所以只有mysql才在启动时设置链接池,pg链接池将在客户端链接请求进来时初始化 - if config.Mysql == os.Getenv(config.DbTypeEnv) { + if config.Mysql == config.DbType { db.idleConns = make(chan *Conn, db.maxConnNum) db.cacheConns = make(chan *Conn, db.maxConnNum) atomic.StoreInt32(&(db.state), Unknown) @@ -142,18 +136,47 @@ func (db *DB) State() string { } func (db *DB) ConnCount() (int, int, int64, int64) { - db.RLock() - defer db.RUnlock() - return len(db.idleConns), len(db.cacheConns), db.pushConnCount, db.popConnCount + if config.Mysql == config.DbType { + db.RLock() + defer db.RUnlock() + return len(db.idleConns), len(db.cacheConns), db.pushConnCount, db.popConnCount + } else { + db.RLock() + defer db.RUnlock() + var idleChannel, cacheChannel chan *Conn + val, flag := config.IdleConnsMap.Load(db.addr + db.user + db.db) + if flag { + idleChannel = val.(chan *Conn) + } + val1, flag1 := config.CacheConnsMap.Load(db.addr + db.user + db.db) + if flag1 { + cacheChannel = val1.(chan *Conn) + } + return len(idleChannel), len(cacheChannel), db.pushConnCount, db.popConnCount + } } func (db *DB) Close() error { - db.Lock() - idleChannel := db.idleConns - cacheChannel := db.cacheConns - db.cacheConns = nil - db.idleConns = nil - db.Unlock() + var idleChannel, cacheChannel chan *Conn + if config.Mysql == config.DbType { + db.Lock() + idleChannel = db.idleConns + cacheChannel = db.cacheConns + db.cacheConns = nil + db.idleConns = nil + db.Unlock() + } else { + val, flag := config.IdleConnsMap.Load(db.addr + db.user + db.db) + if flag { + idleChannel = val.(chan *Conn) + } + val1, flag1 := config.CacheConnsMap.Load(db.addr + db.user + db.db) + if flag1 { + cacheChannel = val1.(chan *Conn) + } + config.IdleConnsMap.Delete(db.addr + db.user + db.db) + config.CacheConnsMap.Delete(db.addr + db.user + db.db) + } if cacheChannel == nil || idleChannel == nil { return nil } @@ -168,24 +191,53 @@ func (db *DB) Close() error { } func (db *DB) getConns() (chan *Conn, chan *Conn) { - db.RLock() - cacheConns := db.cacheConns - idleConns := db.idleConns - db.RUnlock() + var cacheConns, idleConns chan *Conn + if config.Mysql == config.DbType { + db.RLock() + cacheConns = db.cacheConns + idleConns = db.idleConns + db.RUnlock() + } else { + val, flag := config.IdleConnsMap.Load(db.addr + db.user + db.db) + if flag { + idleConns = val.(chan *Conn) + } + val1, flag1 := config.CacheConnsMap.Load(db.addr + db.user + db.db) + if flag1 { + cacheConns = val1.(chan *Conn) + } + } + return cacheConns, idleConns } func (db *DB) getCacheConns() chan *Conn { - db.RLock() - conns := db.cacheConns - db.RUnlock() + var conns chan *Conn + if config.Mysql == config.DbType { + db.RLock() + conns = db.cacheConns + db.RUnlock() + } else { + val1, flag1 := config.CacheConnsMap.Load(db.addr + db.user + db.db) + if flag1 { + conns = val1.(chan *Conn) + } + } return conns } func (db *DB) getIdleConns() chan *Conn { - db.RLock() - conns := db.idleConns - db.RUnlock() + var conns chan *Conn + if config.Mysql == config.DbType { + db.RLock() + conns = db.idleConns + db.RUnlock() + } else { + val, flag := config.IdleConnsMap.Load(db.addr + db.user + db.db) + if flag { + conns = val.(chan *Conn) + } + } return conns } @@ -229,17 +281,51 @@ func (db *DB) newConn(user string) (*Conn, error) { } func (db *DB) addIdleConn() { - conn := new(Conn) - select { - case db.idleConns <- conn: - default: - break + if config.Mysql == config.DbType { + conn := new(Conn) + select { + case db.idleConns <- conn: + default: + break + } + } else { + val, flag := config.IdleConnsMap.Load(db.addr + db.user + db.db) + if flag { + conns := val.(chan *Conn) + conn := new(Conn) + if conns != nil { + select { + case conns <- conn: + default: + break + } + } + } } } func (db *DB) closeConn(co *Conn) error { atomic.AddInt64(&db.pushConnCount, 1) - if config.Mysql == os.Getenv(config.DbTypeEnv) { + if co != nil { + co.Close() + conns := db.getIdleConns() + if conns != nil { + select { + case conns <- co: + return nil + default: + return nil + } + } + } else { + db.addIdleConn() + } + + return nil +} + +func (db *DB) closeConnNotAdd(co *Conn) error { + if config.DbType == config.Mysql { if co != nil { co.Close() conns := db.getIdleConns() @@ -256,50 +342,27 @@ func (db *DB) closeConn(co *Conn) error { } } else { if co != nil { - // If the user link is not cached in the connection pool, it will be released directly. - // If the link is cached, the current link will be released and a new link will be generated to join the pool. - // The number of connections in the maintenance pool will remain unchanged - val, flag := db.cacheConnsMap.Load(db.user + db.db) - if !flag { - atomic.AddInt64(&db.currConnCount, -1) - } else { + co.Close() + val, flag := config.IdleConnsMap.Load(db.addr + db.user + db.db) + if flag { conns := val.(chan *Conn) - if len(conns) == db.InitConnNum { - atomic.AddInt64(&db.currConnCount, -1) - } else { - conn, err := db.newConn(db.user) - if err != nil { + if conns != nil { + select { + case conns <- co: + return nil + default: return nil } - conns <- conn - atomic.AddInt64(&db.pushConnCount, 1) } } - co.Close() + } else { + db.addIdleConn() } } return nil } -func (db *DB) closeConnNotAdd(co *Conn) error { - if co != nil { - co.Close() - conns := db.getIdleConns() - if conns != nil { - select { - case conns <- co: - return nil - default: - return nil - } - } - } else { - db.addIdleConn() - } - return nil -} - func (db *DB) tryReuse(co *Conn) error { var err error //reuse Connection @@ -332,14 +395,6 @@ func (db *DB) tryReuse(co *Conn) error { } func (db *DB) PopConn() (*Conn, error) { - if os.Getenv(config.TimeCostFlag) == "true" { - c := timecost.TimeCost() - defer func() { - if golog.GetLevel() <= 3 { - golog.Warn("TIMECOST", "timecost", fmt.Sprintf("PopConn time cost is %v", c()), 0) - } - }() - } var co *Conn var err error @@ -364,29 +419,29 @@ func (db *DB) PopConn() (*Conn, error) { return co, nil } -func (db *DB) InitConnPoolPg(dbname string, dbuser string) (err error) { +func (db *DB) InitConnPoolPg(dbname string, dbuser string) (cacheConns chan *Conn, err error) { //判断是否关闭链接池 默认开启 - if os.Getenv(config.ConnPoolSwitch) == "false" { + if !config.ConnPool { return } - var cacheConns chan *Conn db.Lock() defer db.Unlock() db.db = dbname db.user = dbuser - _, flag := db.cacheConnsMap.Load(dbuser + dbname) + _, flag := config.CacheConnsMap.Load(db.addr + dbuser + dbname) if !flag { if db.IsExceedMaxConns() { err = errors.ErrConnIsFull - return err + return cacheConns, err } //判断是否超过最大链接池数量 if db.cacheCountNum >= db.maxPoolNum { err = errors.ErrMaxPoolIsFull - return err + return cacheConns, err } //InitConnNum 根据配置文件配置而来;默认值16 cacheConns = make(chan *Conn, db.InitConnNum) + idleConns := make(chan *Conn, db.InitConnNum) atomic.StoreInt32(&(db.state), Unknown) //循环建立链接时间较慢 暂且使用协程处理; @@ -404,19 +459,14 @@ func (db *DB) InitConnPoolPg(dbname string, dbuser string) (err error) { atomic.AddInt64(&db.pushConnCount, 1) atomic.AddInt64(&db.currConnCount, 1) } - db.cacheConnsMap.Store(dbuser+dbname, cacheConns) + config.CacheConnsMap.Store(db.addr + dbuser+dbname, cacheConns) + config.IdleConnsMap.Store(db.addr + dbuser+dbname, idleConns) atomic.AddInt32(&db.cacheCountNum, 1) } - return nil + return cacheConns, nil } func (db *DB) PopConnPg(dbname string, dbuser string) (*Conn, error) { - if os.Getenv(config.TimeCostFlag) == "true" { - c := timecost.TimeCost() - defer func() { - golog.Warn("TIMECOST", "timecost", fmt.Sprintf("handleQueryPg time cost is %v", c()), 0) - }() - } var co *Conn var err error @@ -424,7 +474,7 @@ func (db *DB) PopConnPg(dbname string, dbuser string) (*Conn, error) { db.user = dbuser //判断是否关闭链接池 默认开启 - if os.Getenv(config.ConnPoolSwitch) == "false" { + if !config.ConnPool { //返回一个链接 co, err = db.newConn(dbuser) if err != nil { @@ -432,32 +482,16 @@ func (db *DB) PopConnPg(dbname string, dbuser string) (*Conn, error) { return nil, err } } else { - var cacheConns chan *Conn - val, flag := db.cacheConnsMap.Load(dbuser + dbname) - if flag { - cacheConns = val.(chan *Conn) - } else { - err = db.InitConnPoolPg(dbname, dbuser) - if err != nil { - return nil, err - } + cacheConns, idleConns := db.getConns() + if cacheConns == nil || idleConns == nil { + return nil, errors.ErrDatabaseClose } co = db.GetConnFromCache(cacheConns) if co == nil { - golog.Warn("db", "PopConnPg", "conn is nil", 0) - // 需要判断是否大于最大链接数 - if db.IsExceedMaxConns() { - err = errors.ErrConnIsFull - return nil, err - } - //返回一个链接 - co, err = db.newConn(dbuser) + co, err = db.GetConnFromIdle(cacheConns, idleConns) if err != nil { - db.Close() return nil, err } - atomic.AddInt64(&db.pushConnCount, 1) - atomic.AddInt64(&db.currConnCount, 1) } } @@ -507,31 +541,69 @@ func (db *DB) GetConnFromCache(cacheConns chan *Conn) *Conn { func (db *DB) GetConnFromIdle(cacheConns, idleConns chan *Conn) (*Conn, error) { var co *Conn var err error - select { - case co = <-idleConns: - atomic.AddInt64(&db.popConnCount, 1) - co, err := db.newConn("") - if err != nil { - db.closeConn(co) - return nil, err - } - err = co.Ping() - if err != nil { - db.closeConn(co) - return nil, errors.ErrBadConn - } - return co, nil - case co = <-cacheConns: - atomic.AddInt64(&db.popConnCount, 1) - if co == nil { - return nil, errors.ErrConnIsNil + if config.DbType == config.Mysql { + select { + case co = <-idleConns: + atomic.AddInt64(&db.popConnCount, 1) + co, err := db.newConn("") + if err != nil { + db.closeConn(co) + return nil, err + } + err = co.Ping() + if err != nil { + db.closeConn(co) + return nil, errors.ErrBadConn + } + return co, nil + case co = <-cacheConns: + atomic.AddInt64(&db.popConnCount, 1) + if co == nil { + return nil, errors.ErrConnIsNil + } + if co != nil && PingPeroid < time.Now().Unix()-co.pushTimestamp { + err = co.Ping() + if err != nil { + db.closeConn(co) + return nil, errors.ErrBadConn + } + } } - if co != nil && PingPeroid < time.Now().Unix()-co.pushTimestamp { + } else { + select { + case co = <-idleConns: + atomic.AddInt64(&db.popConnCount, 1) + golog.Warn("db", "PopConnPg", "conn is nil", 0, "addr", db.addr, + "cacheConns len", len(cacheConns), "dbuser+dbname", db.user+db.db) + // 需要判断是否大于最大链接数 + if db.IsExceedMaxConns() { + err = errors.ErrConnIsFull + return nil, err + } + //返回一个链接 + co, err = db.newConn(db.user) + if err != nil { + db.closeConn(co) + return nil, err + } err = co.Ping() if err != nil { db.closeConn(co) return nil, errors.ErrBadConn } + return co, nil + case co = <-cacheConns: + atomic.AddInt64(&db.popConnCount, 1) + if co == nil { + return nil, errors.ErrConnIsNil + } + if co != nil && PingPeroid < time.Now().Unix()-co.pushTimestamp { + err = co.Ping() + if err != nil { + db.closeConn(co) + return nil, errors.ErrBadConn + } + } } } return co, nil @@ -541,7 +613,7 @@ func (db *DB) PushConnForExtendedProtocol(co *Conn, err error) { if co == nil { return } - val, flag := db.cacheConnsMap.Load(db.user + db.db) + val, flag := config.CacheConnsMap.Load(db.addr + db.user + db.db) // 因后端链接未销毁 到时扩展协议时报错: prepared statement "lrupsc_1_0" already exists (SQLSTATE 42P05) // 如果成功创建了一个命名的预备语句对象,那么它将持续到当前会话结束, 除非被明确地删除 (暂未实现 需要采用close命令关闭),现在是直接关闭了链接 // http://www.postgres.cn/docs/14/protocol-message-formats.html @@ -567,55 +639,26 @@ func (db *DB) PushConnForExtendedProtocol(co *Conn, err error) { func (db *DB) PushConn(co *Conn, err error) { atomic.AddInt64(&db.pushConnCount, 1) - if config.Mysql == os.Getenv(config.DbTypeEnv) { - if co == nil { - db.addIdleConn() - return - } - conns := db.getCacheConns() - if conns == nil { - co.Close() - return - } - if err != nil { - db.closeConnNotAdd(co) - return - } - co.pushTimestamp = time.Now().Unix() - select { - case conns <- co: - return - default: - db.closeConnNotAdd(co) - return - } - } else { - if co == nil { - return - } - val, flag := db.cacheConnsMap.Load(db.user + db.db) - if !flag || err != nil { - co.Close() - atomic.AddInt64(&db.currConnCount, -1) - return - } else { - conns := val.(chan *Conn) - if len(conns) == db.InitConnNum { - co.Close() - atomic.AddInt64(&db.currConnCount, -1) - return - } else { - co.pushTimestamp = time.Now().Unix() - select { - case conns <- co: - return - default: - co.Close() - atomic.AddInt64(&db.currConnCount, -1) - return - } - } - } + if co == nil { + db.addIdleConn() + return + } + conns := db.getCacheConns() + if conns == nil { + co.Close() + return + } + if err != nil { + db.closeConnNotAdd(co) + return + } + co.pushTimestamp = time.Now().Unix() + select { + case conns <- co: + return + default: + db.closeConnNotAdd(co) + return } } @@ -650,10 +693,6 @@ func (p *BackendConn) Close() { //data = reset.Encode(data) //err := p.Conn.WritePgPacket(data) //p.Conn.ReadPgPacket() - _, err := p.Conn.ConnPg.Exec(context.Background(), "RESET ALL") - if err != nil { - golog.Warn("db", "Close", "write close parse msg err: "+err.Error(), '0') - } p.db.PushConn(p.Conn, nil) } p.Conn = nil diff --git a/backend/node.go b/backend/node.go index ab5fe7c..f76d410 100644 --- a/backend/node.go +++ b/backend/node.go @@ -17,7 +17,6 @@ package backend import ( "context" "fmt" - "os" "strconv" "strings" "sync" @@ -116,7 +115,7 @@ func (n *Node) checkMasterConnsCache(min int) { //回收空闲超时链接 func dealConnsCache(db *DB, min int) { if db.cacheCountNum != 0 { - db.cacheConnsMap.Range(func(key, value interface{}) bool { + config.CacheConnsMap.Range(func(key, value interface{}) bool { conns := value.(chan *Conn) golog.Debug("Node", "checkMasterConnsCache", " clean conn cache while timeout ", 0, @@ -142,7 +141,8 @@ func dealConnsCache(db *DB, min int) { co.Close() atomic.AddInt64(&db.currConnCount, -1) } - db.cacheConnsMap.Delete(key) + config.CacheConnsMap.Delete(key) + config.IdleConnsMap.Delete(key) atomic.AddInt32(&db.cacheCountNum, -1) } else { conns <- conn @@ -239,7 +239,7 @@ func (n *Node) checkMaster() { atomic.StoreInt32(&(db.state), Up) } // loop for system_table pg_stat_replication & pg_hot_data - if config.Mysql != os.Getenv(config.DbTypeEnv) { + if config.Mysql != config.DbType { // get master and read-only nodes latest LSN in loop // get cached tables relationship with nodes in loop if n.Cfg.LoadBalanceMode == "lsn" || n.Cfg.LoadBalanceMode == "cache" { @@ -367,7 +367,7 @@ func (n *Node) DeleteSlave(addr string) error { func (n *Node) OpenDB(addr string) (*DB, error) { db, err := Open(addr, n.Cfg.User, n.Cfg.Password, "", n.Cfg.MaxConnNum) if err == nil { - if config.Mysql != os.Getenv(config.DbTypeEnv) && 0 < n.Cfg.InitConnCount { + if config.Mysql != config.DbType && 0 < n.Cfg.InitConnCount { db.InitConnNum = n.Cfg.InitConnCount if n.Cfg.MaxPoolNum > 0 { db.maxPoolNum = n.Cfg.MaxPoolNum @@ -463,7 +463,7 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { if conn == nil || conn.ConnPg.Ping(context.Background()) != nil { n.Master, err = n.OpenDB(masterStr) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("node", "getLsnAndCacheMetadata", fmt.Sprintf("open master db err : %s, addr: %s", err.Error(), masterStr), 0) } @@ -476,7 +476,7 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { // init read-only node's LSN rows, er := conn.ConnPg.Query(context.Background(), "select client_addr::text, replay_lsn from pg_stat_replication;") if er != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("node", "ParseMaster", fmt.Sprintf("Query pg_stat_replication failed: %s", er.Error()), 0) } } @@ -485,21 +485,21 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { for rows.Next() { er = rows.Scan(&addr, &lsn) if er != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("node", "ParseMaster", fmt.Sprintf("scan client_addr,replay_lsn err : %s", er.Error()), 0) } break } if val, err := pgLsnInInternal(lsn); err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("node", "ParseMaster", fmt.Sprintf("slave node parse LSN 16hex to 10hex err : %s, addr: %s", err.Error(), addr), 0) } } else { n.NodeLsn.Store(strings.Split(addr, "/")[0], val) - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace("node", "ParseMaster", fmt.Sprintf("slave node parse LSN, addr: %s, lsn 16hex: %s, lsn 10hex: %d ", addr, lsn, val), 0) } @@ -511,13 +511,13 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { golog.Error("node", "ParseMaster", "scan pg_current_wal_lsn err !"+er.Error(), 0) } else { if val, err := pgLsnInInternal(lsn); err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("node", "ParseMaster", fmt.Sprintf("master node parse LSN 16hex to 10hex err : %s, addr: %s", err.Error(), masterStr), 0) } } else { n.NodeLsn.Store(strings.Split(masterStr, ":")[0], val) - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace("node", "ParseMaster", fmt.Sprintf("master node parse LSN, addr: %s, lsn 16hex: %s , lsn 10hex: %d", masterStr, lsn, val), 0) } @@ -526,7 +526,7 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { // init metadata about table which node cached rows, er = conn.ConnPg.Query(context.Background(), "select datname, relname, clientaddr from pg_hot_data;") if er != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("node", "ParseMaster", fmt.Sprintf("Query pg_hot_data failed: %s", er.Error()), 0) } } @@ -541,12 +541,12 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { for rows.Next() { er = rows.Scan(&datname, &relname, &clientaddr) if er != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("node", "ParseMaster", fmt.Sprintf("scan datname, relname err : %s", er.Error()), 0) } break } - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace("node", "ParseMaster", fmt.Sprintf("datname: %s, relname: %s, client_addr: %s", datname, relname, clientaddr), 0) } val, flag := n.NodeCache.Load(datname + "_" + relname) @@ -624,7 +624,7 @@ func (n *Node) ParseSlave(slaveStr string) error { } n.InitBalancer() - if os.Getenv(config.MetricsFlag) != "false" && (n.Cfg.LoadBalanceMode == "metric" || n.Cfg.LoadBalanceMode == "lsn") { + if config.Metrics && (n.Cfg.LoadBalanceMode == "metric" || n.Cfg.LoadBalanceMode == "lsn") { n.BestNodeIndexByMetric = -1 go n.saveBestNodeIndex() } diff --git a/backend/pg_prometheus_metric.go b/backend/pg_prometheus_metric.go index 3dc3152..3f227b7 100644 --- a/backend/pg_prometheus_metric.go +++ b/backend/pg_prometheus_metric.go @@ -72,33 +72,33 @@ func CalculateAndChooseBestNode(cfg *config.NodeConfig, nodeLoad map[int]int) (i } for i := 0; i < len(cfg.MonitorPgNodes); i++ { - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("handle info with node: %s \n", cfg.MonitorPgNodes[i]), 0) } connNum, err := getCurrentConnections(pgPrometheusUrl, pgExporterSlice[i], cfg.PgExporterName) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("Get current connections failed: %s \n", err.Error()), 0) } return -1, nil, errors.New(fmt.Sprintf("Get current connections failed: %s \n", err.Error())) } - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("current connection num: %d \n", connNum), 0) } cpuIdle, err := getCpuIdle(pgPrometheusUrl, nodeExporterSlice[i], cfg.NodeExporterName, cfg.NodeCpuMode, cfg.TimeInterval) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("Get current cpu idle failed: %s \n", err.Error()), 0) } return -1, nil, errors.New(fmt.Sprintf("Get current cpu idle failed: %s \n", err.Error())) } - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("current cpu idle: %d \n", cpuIdle), 0) } @@ -106,14 +106,14 @@ func CalculateAndChooseBestNode(cfg *config.NodeConfig, nodeLoad map[int]int) (i memIdle, err := getMemIdle(pgPrometheusUrl, nodeExporterSlice[i], cfg.NodeExporterName) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("Get current mem idle failed: %s \n", err.Error()), 0) } return -1, nil, errors.New(fmt.Sprintf("Get current mem idle failed: %s \n", err.Error())) } - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("current mem idle: %d \n", memIdle), 0) } @@ -121,13 +121,13 @@ func CalculateAndChooseBestNode(cfg *config.NodeConfig, nodeLoad map[int]int) (i ioUtilization, err := getDiskIOUtilization(pgPrometheusUrl, nodeExporterSlice[i], cfg.NodeExporterName, cfg.PgDataDiskName, cfg.TimeInterval) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("Get current disk io utilization failed: %s \n", err.Error()), 0) } return -1, nil, errors.New(fmt.Sprintf("Get current disk io utilization failed: %s \n", err.Error())) } - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("current disk io utilization: %d \n", ioUtilization), 0) } @@ -137,7 +137,7 @@ func CalculateAndChooseBestNode(cfg *config.NodeConfig, nodeLoad map[int]int) (i index = i maxScore = score } - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace("pg prometheus metric", "CalculateAndChooseBestNode", fmt.Sprintf("current score is %d, max score is %d \n", score, maxScore), 0) } @@ -152,7 +152,7 @@ func getCurrentConnections(prometheusUrl string, pgInstanceLable string, pgJobLa Param("query", "sum(pg_stat_database_numbackends{instance=\""+pgInstanceLable+"\",job=\""+pgJobLable+"\"})"). EndStruct(&metric) if errs != nil || resp.StatusCode > 300 || resp.StatusCode < 200 { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("pg prometheus metric", "getCurrentConnections", fmt.Sprintf("%v %v", resp, errs), 0) } @@ -173,7 +173,7 @@ func getCpuIdle(prometheusUrl string, nodeInstanceLable string, nodeJobLable str "\",job=\""+nodeJobLable+"\"}["+timeInterval+"]))*100"). EndStruct(&metric) if errs != nil || resp.StatusCode > 300 || resp.StatusCode < 200 { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("pg prometheus metric", "getCpuIdle", fmt.Sprintf("%v %v", resp, errs), 0) } @@ -192,7 +192,7 @@ func getMemIdle(prometheusUrl string, nodeInstanceLable string, nodeJobLable str "\",job=\""+nodeJobLable+"\"}/node_memory_MemTotal_bytes{instance=\""+nodeInstanceLable+"\",job=\""+nodeJobLable+"\"}*100"). EndStruct(&metric) if errs != nil || resp.StatusCode > 300 || resp.StatusCode < 200 { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("pg prometheus metric", "getMemIdle", fmt.Sprintf("%v %v", resp, errs), 0) } @@ -210,7 +210,7 @@ func getDiskIOUtilization(prometheusUrl string, nodeInstanceLable string, nodeJo "\",job=\""+nodeJobLable+"\",device=~\""+diskDevice+"\"}["+timeInterval+"])*100"). EndStruct(&metric) if errs != nil || resp.StatusCode > 300 || resp.StatusCode < 200 { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("pg prometheus metric", "getDiskIOUtilization", fmt.Sprintf("%v %v", resp, errs), 0) } @@ -224,7 +224,7 @@ func handleResult(metric Prometheus) (int, error) { defer func() { err := recover() if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("pg prometheus metric", "handleResult", fmt.Sprintf("%v", err), 0) } diff --git a/cmd/he3proxy/main.go b/cmd/he3proxy/main.go index dee01ad..a1084a7 100644 --- a/cmd/he3proxy/main.go +++ b/cmd/he3proxy/main.go @@ -41,7 +41,7 @@ import ( var configFile = flag.String("config", "./etc/he3proxy.yaml", "He3Proxy config file") // Define log level parameters -var logLevel = flag.String("log-level", "", "log level [debug|info|warn|error], default error") +var logLevel = flag.String("log-level", "", "log level [debug|info|warn|error], default set by configuration") // Define version parameters var version = flag.Bool("v", false, "the version of He3Proxy") @@ -52,6 +52,24 @@ var pprof = flag.Bool("pprof", false, "the switch of pprof") // Define pyroscope parameters var pyro = flag.Bool("pyroscope", false, "the switch of pyroscope") +var dbMode = flag.String("db-mode", "postgresql", "switch proxy work mode [mysql|postgresql], default postgresql") + +var serverVersion = flag.String("server-version", "14.2", "set backend server version for pg, default 14.2") + +var singleSession = flag.Bool("single-session", false, "the switch of single session, if true means proxy work without loadbalance") + +var connectionPool = flag.Bool("conn-pool", true, "the switch of connection pool, if true means use backend connection pool") + +var he3proxyFlag = flag.Bool("he3proxy", false, "the flag of he3proxy, if true means tell backend current connection is by he3proxy") + +var metricsFlag = flag.Bool("metrics", false, "the flag of collection metrics, if true means work with prometheus") + +var cancelRequest = flag.Bool("cancel-request", false, "the flag of save connection id and secret for cancel request") + +var simpleParse = flag.Bool("simple-parse", true, "the flag of use simple parse (lex), if false means will use crdb parse (AST)") + +var cpuNum = flag.Int("max-cpu", 0, "the GO MAX PROCS, default sys logical cpu nums") + const ( sqlLogName = "sql.log" sysLogName = "sys.log" @@ -79,13 +97,30 @@ func main() { defer stopper.Stop() fmt.Print(banner) - fmt.Printf("set runtime GOMAXPROCS: %d \n", runtime.NumCPU()) - runtime.GOMAXPROCS(runtime.NumCPU()) // Get parameters output in CMD flag.Parse() // Output version information and compilation time are defined in makefile fmt.Printf("Git commit:%s\n", BuildVersion) fmt.Printf("Build time:%s\n", BuildDate) + if *cpuNum == 0 { + runtime.GOMAXPROCS(runtime.NumCPU()) + fmt.Printf("set runtime GOMAXPROCS: %d \n", runtime.NumCPU()) + } else { + runtime.GOMAXPROCS(*cpuNum) + fmt.Printf("set runtime GOMAXPROCS: %d \n", *cpuNum) + } + + // get config from env + config.DbType = *dbMode + config.ConnPool = *connectionPool + config.ServerVersion = *serverVersion + config.SingleSession = *singleSession + config.ConnPool = *connectionPool + config.He3Proxy = *he3proxyFlag + config.Metrics = *metricsFlag + config.CancelReq = *cancelRequest + config.SimpleParse = *simpleParse + // If there is a version parameter, - V returns directly if *version { return @@ -240,7 +275,7 @@ func main() { golog.Info("main", "main", "Got update config signal", 0) newCfg, err := config.ParseConfigFile(*configFile) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("main", "main", fmt.Sprintf("parse config file error:%s", err.Error()), 0) } } else { @@ -253,9 +288,9 @@ func main() { go apiSvr.Run() // start prometheus go prometheusSvr.Run() - svr.Run(os.Getenv(config.DbTypeEnv)) + svr.Run(config.DbType) golog.Info("server", "onConn", "server working at "+ - os.Getenv(config.DbTypeEnv)+" mode", 0) + config.DbType+" mode", 0) } func setLogLevel(level string) { diff --git a/config/config.go b/config/config.go index a02ded7..25b25e4 100644 --- a/config/config.go +++ b/config/config.go @@ -16,6 +16,7 @@ package config import ( "io/ioutil" + "sync" "gopkg.in/yaml.v2" ) @@ -23,22 +24,32 @@ import ( // Used to save the configuration through the API var configFileName string +//Used to save connection mode, mysql or pg +var DbType string + +//Used to save connection pool switch +var ConnPool bool +var SingleSession bool +var He3Proxy bool + +//if false will close metrics collection, default true +var Metrics bool + +//if false will not store client connection Id, means cancel request send cmd will invalidate, default true +var CancelReq bool + +// if true, change crdb parse to kingshard lex analysis +var SimpleParse bool +var ServerVersion string + +var CacheConnsMap sync.Map + +var IdleConnsMap sync.Map + const ( - DbTypeEnv = "DB_TYPE" - ServerVersionEnv = "SERVER_VERSION" - SingleSession = "SINGLE_SESSION" - Mysql = "mysql" - PG = "postgresql" - ConnPoolSwitch = "CONNS_POOL_SWITCH" - DefaultHe3User = "he3proxy" - ConnFlag = "HE3PROXY_FLAG" - //if false will close metrics collection, default true - MetricsFlag = "HE3PROXY_METRICS_FLAG" - //if false will not store client connection Id, means cancel request send cmd will invalidate, default true - CancelReqFlag = "HE3PROXY_CANCEL_REQUEST_FLAG" - TimeCostFlag = "HE3PROXY_TIME_COST_FLAG" - // if true, change crdb parse to kingshard lex analysis - SimpleParseFlag = "HE3PROXY_SIMPLE_PARSE_FLAG" + Mysql = "mysql" + PG = "postgresql" + DefaultHe3User = "he3proxy" ) // Structure corresponding to the entire config file diff --git a/mysql/packetio.go b/mysql/packetio.go index ac76039..7f0d820 100644 --- a/mysql/packetio.go +++ b/mysql/packetio.go @@ -23,7 +23,7 @@ import ( ) const ( - defaultReaderSize = 8 * 1024 + defaultReaderSize = 1<<14 - 1 ) type PacketIO struct { diff --git a/proxy/server/conn_pgsql.go b/proxy/server/conn_pgsql.go index 63c403c..93ede1a 100644 --- a/proxy/server/conn_pgsql.go +++ b/proxy/server/conn_pgsql.go @@ -32,7 +32,7 @@ import ( "time" "unsafe" - timecost "github.com/dablelv/go-huge-util" + //timecost "github.com/dablelv/go-huge-util" "github.com/jackc/pgproto3/v2" "github.com/jackc/pgx/v4" @@ -126,7 +126,7 @@ func (c *ClientConn) RunPg(ctx context.Context) { }() // flag for just use master node, just use for some special cases // treat it simple and crude, set in transaction - if os.Getenv(config.SingleSession) == "true" { + if config.SingleSession { c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT c.status |= mysql.SERVER_STATUS_IN_TRANS c.alwaysCurNode = true @@ -148,7 +148,7 @@ func (c *ClientConn) RunPg(ctx context.Context) { return } c.configVer = c.proxy.configVer - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug(moduleName, "RunPg", fmt.Sprintf("config reload ok, ver: %d", c.configVer), c.connectionId) } @@ -187,7 +187,7 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte cc.proxy.counter.IncrClientQPS() cmd := header[0] var err error - golog.Trace(moduleName, "dispatchPg", "cmd str:"+string(cmd), cc.connectionId) + //golog.Trace(moduleName, "dispatchPg", "cmd str:"+string(cmd), cc.connectionId) switch cmd { case 'Q': /* simple query */ simpleQuery := pgproto3.Query{} @@ -241,7 +241,7 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte cc.parseFlag = true cc.backendConn, err = cc.preHandlePg(sql, ctx) if err == nil { - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug(moduleName, "parse", fmt.Sprintf("exec sql [%s] by node [%s]", sql, cc.backendConn.GetAddr()), cc.connectionId, "dbname", cc.db) } @@ -309,14 +309,6 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte } func (cc *ClientConn) handleParsePrepare(ctx context.Context) error { - if os.Getenv(config.TimeCostFlag) == "true" { - c := timecost.TimeCost() - defer func() { - if golog.GetLevel() <= 3 { - golog.Warn("TIMECOST", "timecost", fmt.Sprintf("handleParsePrepare time cost is %v", c()), cc.connectionId) - } - }() - } var parseData []byte cc.Parse.Range(func(key, value interface{}) bool { parse := (value).(pgproto3.Parse) @@ -326,13 +318,13 @@ func (cc *ClientConn) handleParsePrepare(ctx context.Context) error { if parseData == nil || len(parseData) == 0 { return nil } - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug(moduleName, "handleParsePrepare", fmt.Sprintf("write cached parse data is: %s", string(parseData)), cc.connectionId) } err := cc.backendConn.Conn.WritePgPacket(parseData) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleParsePrepare", fmt.Sprintf("write parse to connection err: %s", err.Error()), cc.connectionId) } } @@ -342,26 +334,18 @@ func (cc *ClientConn) handleParsePrepare(ctx context.Context) error { // handle simple query protocol func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte) error { - if os.Getenv(config.TimeCostFlag) == "true" { - c := timecost.TimeCost() - defer func() { - if golog.GetLevel() <= 3 { - golog.Warn("TIMECOST", "timecost", fmt.Sprintf("handleQueryPg time cost is %v", c()), cc.connectionId) - } - }() - } var err error if cc.backendConn == nil || cc.backendConn.Conn == nil { cc.backendConn, err = cc.preHandlePg(sql, ctx) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleQueryPg", err.Error(), 0, "sql", sql) } return err } if cc != nil && cc.backendConn != nil { - if os.Getenv(config.CancelReqFlag) != "false" { + if config.CancelReq { clientConnMap.Store(cc.connectionId, cc) } cc.handleParsePrepare(ctx) @@ -377,7 +361,7 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte return errors.ErrConnIsNil } defer cc.closeConn(cc.backendConn, false) - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug(moduleName, "handleQueryPg", fmt.Sprintf("exec sql [%s] by node [%s]", sql, cc.backendConn.GetAddr()), cc.connectionId, "dbname", cc.db) } @@ -417,7 +401,7 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte sqlStr := "BEGIN;" err = cc.backendConn.Conn.WritePgPacket((&pgproto3.Query{String: sqlStr}).Encode(nil)) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) } return err @@ -427,7 +411,7 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte for { d, e := cc.backendConn.Conn.ReadPgPacket(header) if e != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", e.Error()), cc.connectionId) } return e @@ -441,7 +425,7 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte // exec current statement err = cc.backendConn.Conn.WritePgPacket(data) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) } return err @@ -452,7 +436,7 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte } else { err = cc.backendConn.Conn.WritePgPacket(data) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) } return err @@ -461,7 +445,7 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte err = cc.receiveBackendMsg(ctx) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("receiveBackend msg err: %s", err.Error()), cc.connectionId) } return err @@ -598,12 +582,12 @@ readloop: cc.dataHeader = cc.dataHeader[:5] cc.dataHeader, err = cc.backendConn.Conn.ReadPgPacket(cc.dataHeader) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", err.Error()), cc.connectionId) } return err } - if golog.GetLevel() <= 0 { + if golog.GetLevel() <= golog.LevelTrace { golog.Trace(moduleName, "receiveBackendMsg", fmt.Sprintf("recv packet from backend msg type: %s", string(cc.dataHeader[0])), cc.connectionId) } // deal with copy msg @@ -629,7 +613,7 @@ readloop: lsn := pgproto3.LsnResponse{} lsn.Decode(cc.dataHeader[5:]) addr := cc.backendConn.ConnPg.PgConn().Conn().RemoteAddr().String() - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug("pg conn", "receiveBackendMsg", fmt.Sprintf("addr: %s, lsn: %d", addr, lsn.LSN), cc.connectionId) } // set LSN to node @@ -678,7 +662,7 @@ readloop: cc.dataRecv = BytesCombine(cc.dataRecv, cc.dataHeader) err = cc.WriteData(cc.dataRecv) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("write data to backend err: %v", err), cc.connectionId) } @@ -687,8 +671,8 @@ readloop: break readloop } if cc.dataHeader[0] == 'E' { - if golog.GetLevel() <= 4 { - golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read err packet from backend: %s", string(cc.dataHeader)), cc.connectionId) + if golog.GetLevel() <= golog.LevelWarn { + golog.Warn(moduleName, "receiveBackendMsg", fmt.Sprintf("read err packet from backend: %s", string(cc.dataHeader)), cc.connectionId) } } @@ -927,7 +911,7 @@ func (cc *ClientConn) handleStartupMessage(ctx context.Context, startupMessage * // connection env 'HE3PROXY' need support by db engine // if you use original postgres, please close this flag or will encounter an error like: // FATAL: unrecognized configuration parameter "he3proxy" (SQLSTATE 42704) - if os.Getenv(config.ConnFlag) != "false" { + if config.He3Proxy { os.Setenv("HE3PROXY", "true") } if cc.db == "" { @@ -949,7 +933,7 @@ func (cc *ClientConn) handleStartupMessage(ctx context.Context, startupMessage * ^ HINT: Perhaps you meant to reference the column "d.datname" or the column "d.datacl". */ - serverVersion := os.Getenv(config.ServerVersionEnv) + serverVersion := config.ServerVersion if serverVersion == "" { serverVersion = "9.0.0" } @@ -984,7 +968,7 @@ func (cc *ClientConn) handleStartupMessage(ctx context.Context, startupMessage * if err := cc.writeReadyForQuery(ctx, 'I'); err != nil { return err } - if golog.GetLevel() <= 2 { + if golog.GetLevel() <= golog.LevelInfo { golog.Info(moduleName, "handleStartupMessage", fmt.Sprintf("%s connection succeeded", cc.c.RemoteAddr().String()), cc.connectionId) } @@ -1154,9 +1138,9 @@ func (cc *ClientConn) readPacketPg() ([]byte, []byte, error) { return nil, nil, err } msgLen := int(binary.BigEndian.Uint32(header[1:]) - 4) - if msgLen > cap(cc.dataRecv){ + if msgLen > cap(cc.dataRecv) { cc.dataRecv = make([]byte, msgLen) - }else { + } else { cc.dataRecv = cc.dataRecv[:msgLen] } if _, err := io.ReadAtLeast(cc.pkg.Rb, cc.dataRecv, msgLen); err != nil { @@ -1203,7 +1187,7 @@ func (cc *ClientConn) writeSSLRequest(ctx context.Context, pgRequestSSL byte) er func (cc *ClientConn) WriteData(data []byte) error { if n, err := cc.pkg.Wb.Write(data); err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "WriteData", fmt.Sprintf("write data to backend err: %v", err), cc.connectionId) } return mysql.ErrBadConn diff --git a/proxy/server/conn_preshard.go b/proxy/server/conn_preshard.go index 055b081..99e2345 100644 --- a/proxy/server/conn_preshard.go +++ b/proxy/server/conn_preshard.go @@ -17,13 +17,11 @@ package server import ( "context" "fmt" - "os" "strings" "github.com/auxten/postgresql-parser/pkg/sql/parser" "github.com/auxten/postgresql-parser/pkg/sql/sem/tree" "github.com/auxten/postgresql-parser/pkg/walk" - timecost "github.com/dablelv/go-huge-util" "gitee.com/he3db/he3proxy/backend" "gitee.com/he3db/he3proxy/config" @@ -116,7 +114,7 @@ func (c *ClientConn) preHandleShard(sql string) (bool, error) { if len(rs) == 0 { msg := "result is empty" - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("conn_preshard", "handleUnsupport", msg, 0, "sql", sql) } @@ -142,14 +140,14 @@ func (c *ClientConn) preHandleShard(sql string) (bool, error) { //preprocessing sql before parse sql func (c *ClientConn) preHandlePg(sql string, ctx context.Context) (*backend.BackendConn, error) { - if os.Getenv(config.TimeCostFlag) == "true" { - tc := timecost.TimeCost() - defer func() { - if golog.GetLevel() <= 3 { - golog.Warn("TIMECOST", "timecost", fmt.Sprintf("preHandlePg time cost is %v", tc()),c.connectionId) - } - }() - } + //if os.Getenv(config.TimeCostFlag) == "true" { + // tc := timecost.TimeCost() + // defer func() { + // if golog.GetLevel() <= 3 { + // golog.Warn("TIMECOST", "timecost", fmt.Sprintf("preHandlePg time cost is %v", tc()),c.connectionId) + // } + // }() + //} var err error var executeDB *ExecuteDB @@ -242,15 +240,7 @@ func (c *ClientConn) GetTransExecDB(tokens []string, sql string) (*ExecuteDB, er //if sql need shard return nil, else return the unshard db func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) { - if os.Getenv(config.TimeCostFlag) == "true" { - tc := timecost.TimeCost() - defer func() { - if golog.GetLevel() <= 3 { - golog.Warn("TIMECOST", "timecost", fmt.Sprintf("GetExecDB time cost is %v", tc()), c.connectionId) - } - }() - } - if golog.GetLevel() <= 1 { + if golog.GetLevel() <= golog.LevelDebug { golog.Debug("conn_preshard", "GetExecDB", fmt.Sprintf("tokens: [%v], sql: [%s]", tokens, sql), c.connectionId) } @@ -258,7 +248,7 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) // The logic of PG node selection is processed. // Since there is no need to divide the database and table, the original kingshard logic is removed. // And first determine whether it is read. If not, the default is master. - if config.Mysql == os.Getenv(config.DbTypeEnv) { + if config.Mysql == config.DbType { if 0 < tokensLen { tokenId, ok := mysql.PARSE_TOKEN_MAP[strings.ToLower(tokens[0])] if ok == true { @@ -294,7 +284,7 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) } } // simple parse, just use lex analysis and lb for cache mode will change to weight - if os.Getenv(config.SimpleParseFlag) == "true" { + if config.SimpleParse { f = false tokenId, ok := mysql.PARSE_TOKEN_MAP[strings.ToLower(tokens[0])] if ok == true { @@ -361,7 +351,7 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) } _, _ = w.Walk(stmts, nil) if err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("conn_preshard", "GetExecDB", fmt.Sprintf("Can't parse sql: [%s], err: [%s]", sql, err.Error()), c.connectionId) } diff --git a/proxy/server/conn_query.go b/proxy/server/conn_query.go index 1ba5e0c..90ed29f 100644 --- a/proxy/server/conn_query.go +++ b/proxy/server/conn_query.go @@ -15,6 +15,7 @@ package server import ( + "context" "fmt" "runtime" "strings" @@ -344,6 +345,7 @@ func (c *ClientConn) closeConn(conn *backend.BackendConn, rollback bool) { if c.isInTransaction() { return } + defer conn.Close() if rollback { conn.Rollback() } @@ -360,8 +362,11 @@ func (c *ClientConn) closeConn(conn *backend.BackendConn, rollback bool) { golog.Debug(moduleName, "closeConn", fmt.Sprintf("close prepare statement: [%s], len: [%d]", string(parseData), len(parseData)), c.connectionId) c.backendConn.Conn.WritePgPacket(parseData) + _, err := c.backendConn.Conn.ConnPg.Exec(context.Background(), "RESET ALL") + if err != nil { + golog.Warn("db", "Close", "write close parse msg err: "+err.Error(), '0') + } } - conn.Close() } func (c *ClientConn) closeShardConns(conns map[string]*backend.BackendConn, rollback bool) { diff --git a/proxy/server/server.go b/proxy/server/server.go index 63cf6c7..5a78530 100644 --- a/proxy/server/server.go +++ b/proxy/server/server.go @@ -173,7 +173,7 @@ func parseNode(cfg config.NodeConfig) (*backend.Node, error) { go n.CheckNode() // go routine for callback connection cache - if os.Getenv(config.DbTypeEnv) != config.Mysql { + if config.DbType != config.Mysql { go n.CheckConnsCache() } @@ -334,7 +334,7 @@ func (s *Server) newClientConn(co net.Conn) *ClientConn { // meaning that data is sent as soon as possible after a Write. //I set this option false. tcpConn.SetNoDelay(false) - //tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlive(true) c.c = tcpConn func() { @@ -420,7 +420,7 @@ func (s *Server) onConn(c net.Conn, dbType string) { ctx := context.Background() if err := conn.handshake(ctx); err != nil { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("server", "onConn", err.Error(), 0) } conn.writePgErr(ctx, "08006", err.Error()) @@ -436,8 +436,8 @@ func (s *Server) onConn(c net.Conn, dbType string) { // init all db-node connection pool masterNode := s.nodes["node1"].Master - if err := masterNode.InitConnPoolPg(conn.db, conn.user); err != nil { - if golog.GetLevel() <= 4 { + if _, err := masterNode.InitConnPoolPg(conn.db, conn.user); err != nil { + if golog.GetLevel() <= golog.LevelError { golog.Error("server", "onConn", fmt.Sprintf("masterNode %s InitConnPoolPg err: %s", masterNode.Addr(), err.Error()), 0) } conn.writePgErr(ctx, "53300", err.Error()) @@ -445,8 +445,8 @@ func (s *Server) onConn(c net.Conn, dbType string) { return } for _, db := range s.nodes["node1"].Slave { - if err := db.InitConnPoolPg(conn.db, conn.user); err != nil { - if golog.GetLevel() <= 4 { + if _, err := db.InitConnPoolPg(conn.db, conn.user); err != nil { + if golog.GetLevel() <= golog.LevelError { golog.Error("server", "onConn", fmt.Sprintf("Slave %s InitConnPoolPg err: %s", db.Addr(), err.Error()), 0) } conn.writePgErr(ctx, "53300", err.Error()) @@ -858,7 +858,7 @@ func (s *Server) UpdateConfig(newCfg *config.Config) { for user := range newUserList { if _, exist := newSchemas[user]; !exist { - if golog.GetLevel() <= 4 { + if golog.GetLevel() <= golog.LevelError { golog.Error("Server", "UpdateConfig", fmt.Sprintf("user [%s] must have a schema", user), 0) } return -- Gitee From 06e80a8d965f4780de23d054078559f9449194b3 Mon Sep 17 00:00:00 2001 From: wangyao Date: Mon, 21 Nov 2022 15:41:29 +0800 Subject: [PATCH 5/7] improve: performance epollwait2 --- backend/backend_conn.go | 22 ++++++++++++---------- backend/balancer.go | 22 +++++++++++++++++----- backend/db.go | 26 +++++++++++++++++++++++--- cmd/he3proxy/main.go | 16 +++++++++++----- config/config.go | 1 + proxy/server/conn_pgsql.go | 16 ++++++++++++---- proxy/server/conn_query.go | 2 +- 7 files changed, 77 insertions(+), 28 deletions(-) diff --git a/backend/backend_conn.go b/backend/backend_conn.go index f0a4641..b7be2eb 100644 --- a/backend/backend_conn.go +++ b/backend/backend_conn.go @@ -139,14 +139,14 @@ func (c *Conn) ReConnect() error { } func (c *Conn) ReConnectPg() error { - if c.conn != nil { - c.conn.Close() + if c.ConnPg.PgConn() != nil { + c.ConnPg.Close(context.Background()) } // TODO init dbname, default postgres if c.db == "" { c.db = "postgres" } - var str = []string{"postgres://",c.user,":", c.password, "@", c.addr, "/", c.db} + var str = []string{"postgres://", c.user, ":", c.password, "@", c.addr, "/", c.db} urlExample := strings.Join(str, "") //urlExample := fmt.Sprintf("postgres://%s:%s@%s/%s", c.user, c.password, c.addr, c.db) conn, err := pgx.Connect(context.Background(), urlExample) @@ -155,17 +155,17 @@ func (c *Conn) ReConnectPg() error { return err } c.ConnPg = *conn - c.conn = c.ConnPg.PgConn().Conn().(*net.TCPConn) + tcpConn := c.ConnPg.PgConn().Conn().(*net.TCPConn) //SetNoDelay controls whether the operating system should delay packet transmission // in hopes of sending fewer packets (Nagle's algorithm). // The default is true (no delay), // meaning that data is sent as soon as possible after a Write. //I set this option false. - c.ConnPg.PgConn().Conn().(*net.TCPConn).SetNoDelay(false) - c.ConnPg.PgConn().Conn().(*net.TCPConn).SetKeepAlive(true) + tcpConn.SetNoDelay(false) + tcpConn.SetKeepAlive(true) - c.pkg = mysql.NewPacketIO(c.ConnPg.PgConn().Conn().(*net.TCPConn)) + c.pkg = mysql.NewPacketIO(tcpConn) c.recvData = make([]byte, 0, 16384) //if err := c.ConnPg.Ping(context.Background()); err != nil { @@ -178,11 +178,13 @@ func (c *Conn) ReConnectPg() error { } func (c *Conn) Close() error { - if c.conn != nil { + if c.conn != nil{ c.conn.Close() c.conn = nil c.salt = nil c.pkgErr = nil + } + if !c.ConnPg.IsClosed(){ c.ConnPg.Close(context.Background()) } @@ -803,9 +805,9 @@ func (c *Conn) ReadPgPacket(data []byte) ([]byte, error) { msgLen := int(binary.BigEndian.Uint32(data[1:]) - 4) //build receive byte - if msgLen > cap(c.recvData){ + if msgLen > cap(c.recvData) { c.recvData = make([]byte, msgLen, msgLen) - }else { + } else { c.recvData = c.recvData[:msgLen] } if _, err := io.ReadAtLeast(c.pkg.Rb, c.recvData, msgLen); err != nil { diff --git a/backend/balancer.go b/backend/balancer.go index 9b10251..f6c803a 100644 --- a/backend/balancer.go +++ b/backend/balancer.go @@ -83,6 +83,17 @@ func (n *Node) InitBalancer() { } } +func (n *Node) getNextSlaveByRandom() (*DB, error) { + nodeLen := len(n.Slave) + if nodeLen == 0 { + return nil, errors.ErrNoDatabase + } else if nodeLen == 1 { + return n.Slave[0], nil + } else { + return n.Slave[randNum(nodeLen)], nil + } +} + func (n *Node) getNextSlaveByWeight() (*DB, error) { n.Lock() defer n.Unlock() @@ -111,7 +122,9 @@ func (n *Node) getNextSlaveByWeight() (*DB, error) { } func (n *Node) GetNextSlave() (*DB, error) { - if n.Cfg.LoadBalanceMode == "weight" { + if n.Cfg.LoadBalanceMode == "random" { + return n.getNextSlaveByRandom() + } else if n.Cfg.LoadBalanceMode == "weight" { return n.getNextSlaveByWeight() } else if n.Cfg.LoadBalanceMode == "lsn" { // Determine whether the LSN number meets the requirements @@ -201,7 +214,7 @@ func (n *Node) GetNextSlave() (*DB, error) { if len(indexs) == 1 { return n.Slave[indexs[0]], nil } else if len(indexs) > 1 { - return n.Slave[randNum()%len(indexs)], nil + return n.Slave[indexs[randNum(len(indexs))]], nil } else { // if no cached node matched will choose a node by "weight" return n.getNextSlaveByWeight() @@ -240,8 +253,7 @@ func (n *Node) getNextSlaveByCache() []int { return indexs } -func randNum() int { - rand.Seed(time.Now().UnixNano()) +func randNum(n int) int { // rand num < 100 - return rand.Intn(100) + return rand.Intn(n) } diff --git a/backend/db.go b/backend/db.go index 0d19a16..cc814ff 100644 --- a/backend/db.go +++ b/backend/db.go @@ -459,8 +459,8 @@ func (db *DB) InitConnPoolPg(dbname string, dbuser string) (cacheConns chan *Con atomic.AddInt64(&db.pushConnCount, 1) atomic.AddInt64(&db.currConnCount, 1) } - config.CacheConnsMap.Store(db.addr + dbuser+dbname, cacheConns) - config.IdleConnsMap.Store(db.addr + dbuser+dbname, idleConns) + config.CacheConnsMap.Store(db.addr+dbuser+dbname, cacheConns) + config.IdleConnsMap.Store(db.addr+dbuser+dbname, idleConns) atomic.AddInt32(&db.cacheCountNum, 1) } return cacheConns, nil @@ -573,7 +573,7 @@ func (db *DB) GetConnFromIdle(cacheConns, idleConns chan *Conn) (*Conn, error) { select { case co = <-idleConns: atomic.AddInt64(&db.popConnCount, 1) - golog.Warn("db", "PopConnPg", "conn is nil", 0, "addr", db.addr, + golog.Warn("db", "PopConnPg", "conn is nil, get from idle conns ", 0, "addr", db.addr, "cacheConns len", len(cacheConns), "dbuser+dbname", db.user+db.db) // 需要判断是否大于最大链接数 if db.IsExceedMaxConns() { @@ -604,6 +604,26 @@ func (db *DB) GetConnFromIdle(cacheConns, idleConns chan *Conn) (*Conn, error) { return nil, errors.ErrBadConn } } + //case <-time.After(10 * time.Millisecond): + // golog.Warn("db", "PopConnPg", "waiting conn time After 10ms", 0, "addr", db.addr, + // "cacheConns len", len(cacheConns), "dbuser+dbname", db.user+db.db) + // // 需要判断是否大于最大链接数 + // if db.IsExceedMaxConns() { + // err = errors.ErrConnIsFull + // return nil, err + // } + // //返回一个链接 + // co, err = db.newConn(db.user) + // if err != nil { + // db.closeConn(co) + // return nil, err + // } + // err = co.Ping() + // if err != nil { + // db.closeConn(co) + // return nil, errors.ErrBadConn + // } + // return co, nil } } return co, nil diff --git a/cmd/he3proxy/main.go b/cmd/he3proxy/main.go index a1084a7..3e3936c 100644 --- a/cmd/he3proxy/main.go +++ b/cmd/he3proxy/main.go @@ -70,6 +70,10 @@ var simpleParse = flag.Bool("simple-parse", true, "the flag of use simple parse var cpuNum = flag.Int("max-cpu", 0, "the GO MAX PROCS, default sys logical cpu nums") +var cpuProfile = flag.Bool("cpu-profile", true, "analysis cpu profile") + +var readonly = flag.Bool("readonly", false, "improve read performance by not release conn") + const ( sqlLogName = "sql.log" sysLogName = "sys.log" @@ -91,17 +95,18 @@ const banner string = ` ` func main() { - // 开始性能分析, 返回一个停止接口 - stopper := profile.Start(profile.CPUProfile, profile.ProfilePath(".")) - // 在main()结束时停止性能分析 - defer stopper.Stop() - fmt.Print(banner) // Get parameters output in CMD flag.Parse() // Output version information and compilation time are defined in makefile fmt.Printf("Git commit:%s\n", BuildVersion) fmt.Printf("Build time:%s\n", BuildDate) + if *cpuProfile { + // 开始性能分析, 返回一个停止接口 + stopper := profile.Start(profile.CPUProfile, profile.ProfilePath(".")) + // 在main()结束时停止性能分析 + defer stopper.Stop() + } if *cpuNum == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) fmt.Printf("set runtime GOMAXPROCS: %d \n", runtime.NumCPU()) @@ -115,6 +120,7 @@ func main() { config.ConnPool = *connectionPool config.ServerVersion = *serverVersion config.SingleSession = *singleSession + config.ReadOnly = *readonly config.ConnPool = *connectionPool config.He3Proxy = *he3proxyFlag config.Metrics = *metricsFlag diff --git a/config/config.go b/config/config.go index 25b25e4..6653dbd 100644 --- a/config/config.go +++ b/config/config.go @@ -30,6 +30,7 @@ var DbType string //Used to save connection pool switch var ConnPool bool var SingleSession bool +var ReadOnly bool var He3Proxy bool //if false will close metrics collection, default true diff --git a/proxy/server/conn_pgsql.go b/proxy/server/conn_pgsql.go index 93ede1a..f132ca4 100644 --- a/proxy/server/conn_pgsql.go +++ b/proxy/server/conn_pgsql.go @@ -131,6 +131,9 @@ func (c *ClientConn) RunPg(ctx context.Context) { c.status |= mysql.SERVER_STATUS_IN_TRANS c.alwaysCurNode = true } + if config.ReadOnly { + c.alwaysCurNode = true + } for { header, msg, err := c.readPacketPg() if err != nil { @@ -443,6 +446,11 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte } } + // mock + //by := []byte{84,0,0,0,26,0,1,99,0,0,0,70,57,0,3,0,0,4,18,255,255,0,0,0,124,0,0,68,0,0,0,130,0,1,0,0,0,120,50,52,51,54,55,51,53,50,55,53,57,45,57,50,56,53,56,54,51,48,54,53,57,45,48,49,48,51,52,57,56,48,53,52,57,45,54,55,54,53,56,56,56,49,55,52,52,45,51,56,52,57,49,50,56,48,50,55,53,45,49,55,49,48,52,55,52,55,51,49,56,45,53,49,52,53,56,53,50,57,54,55,55,45,57,54,53,52,54,51,53,48,54,49,48,45,49,52,56,53,53,55,51,51,51,55,51,45,55,54,56,57,57,55,50,56,54,54,57,32,67,0,0,0,13,83,69,76,69,67,84,32,49,0,90,0,0,0,5,73} + //cc.WriteData(by) + //return err + err = cc.receiveBackendMsg(ctx) if err != nil { if golog.GetLevel() <= golog.LevelError { @@ -680,10 +688,10 @@ readloop: // TODO At present, all data are returned. // In the future, we need to consider the situation of multiple data. // We need to set a threshold and return in batches - if (len(cc.dataRecv) + 1<<12) > mysql.MaxPayloadLen { - cc.WriteData(cc.dataRecv) - cc.dataRecv = cc.dataRecv[:0] - } + //if (len(cc.dataRecv) + 1<<12) > mysql.MaxPayloadLen { + // cc.WriteData(cc.dataRecv) + // cc.dataRecv = cc.dataRecv[:0] + //} } return nil } diff --git a/proxy/server/conn_query.go b/proxy/server/conn_query.go index 90ed29f..8795446 100644 --- a/proxy/server/conn_query.go +++ b/proxy/server/conn_query.go @@ -342,7 +342,7 @@ func (c *ClientConn) executeInMultiNodes(conns map[string]*backend.BackendConn, func (c *ClientConn) closeConn(conn *backend.BackendConn, rollback bool) { //TODO 事物 - if c.isInTransaction() { + if c.isInTransaction() || c.alwaysCurNode { return } defer conn.Close() -- Gitee From 22e7d07866ff3889664d91f50f81df511b001aa4 Mon Sep 17 00:00:00 2001 From: wangyao Date: Fri, 25 Nov 2022 16:25:06 +0800 Subject: [PATCH 6/7] use netpoll instead of pgx --- backend/backend_conn.go | 316 ++++++++++++++++++++++++------ backend/db.go | 48 ++--- backend/node.go | 16 +- cmd/he3proxy/main.go | 3 + config/config.go | 3 + go.mod | 3 + go.sum | 7 + proxy/server/conn.go | 5 +- proxy/server/conn_pgsql.go | 351 ++++++++++++++++++++-------------- proxy/server/conn_preshard.go | 24 +-- proxy/server/conn_query.go | 24 +-- proxy/server/server.go | 2 +- 12 files changed, 545 insertions(+), 257 deletions(-) diff --git a/backend/backend_conn.go b/backend/backend_conn.go index b7be2eb..0b8d53f 100644 --- a/backend/backend_conn.go +++ b/backend/backend_conn.go @@ -17,14 +17,19 @@ package backend import ( "bytes" "context" + "crypto/md5" "encoding/binary" + "encoding/hex" "errors" "fmt" "io" "net" + "os" "strings" "time" + "github.com/cloudwego/netpoll" + "github.com/jackc/pgproto3/v2" "github.com/jackc/pgx/v4" "gitee.com/he3db/he3proxy/config" @@ -59,7 +64,10 @@ type Conn struct { pkgErr error // pg param - ConnPg pgx.Conn + ConnPg netpoll.Connection + ConnPgx *pgx.Conn + ProcessID uint32 + SecretKey uint32 recvData []byte } @@ -73,6 +81,7 @@ func (c *Conn) Connect(addr string, user string, password string, db string) err // use utf8 c.collation = mysql.DEFAULT_COLLATION_ID c.charset = mysql.DEFAULT_CHARSET + c.ConnPgx = new(pgx.Conn) // Determine whether PG or MySQL link if config.Mysql == config.DbType { return c.ReConnect() @@ -138,54 +147,249 @@ func (c *Conn) ReConnect() error { return nil } +//func (c *Conn) ReConnectPg() error { +// if c.ConnPg.PgConn() != nil { +// c.ConnPg.Close(context.Background()) +// } +// // TODO init dbname, default postgres +// if c.db == "" { +// c.db = "postgres" +// } +// var str = []string{"postgres://", c.user, ":", c.password, "@", c.addr, "/", c.db} +// urlExample := strings.Join(str, "") +// //urlExample := fmt.Sprintf("postgres://%s:%s@%s/%s", c.user, c.password, c.addr, c.db) +// conn, err := pgx.Connect(context.Background(), urlExample) +// if err != nil { +// golog.Error("backend conn", "ReConnectPg", "pgx.Connect", 0, "urlExample", urlExample, "error", err.Error()) +// return err +// } +// c.ConnPg = *conn +// tcpConn := c.ConnPg.PgConn().Conn().(*net.TCPConn) +// +// //SetNoDelay controls whether the operating system should delay packet transmission +// // in hopes of sending fewer packets (Nagle's algorithm). +// // The default is true (no delay), +// // meaning that data is sent as soon as possible after a Write. +// //I set this option false. +// tcpConn.SetNoDelay(false) +// tcpConn.SetKeepAlive(true) +// +// c.pkg = mysql.NewPacketIO(tcpConn) +// +// c.recvData = make([]byte, 0, 16384) +// //if err := c.ConnPg.Ping(context.Background()); err != nil { +// // golog.Error("backend conn", "ReConnectPg", "pgx.Ping", 0, "urlExample", urlExample, "error", err.Error()) +// // c.ConnPg.Close(context.Background()) +// // return err +// //} +// +// return nil +//} + func (c *Conn) ReConnectPg() error { - if c.ConnPg.PgConn() != nil { - c.ConnPg.Close(context.Background()) - } - // TODO init dbname, default postgres - if c.db == "" { - c.db = "postgres" - } - var str = []string{"postgres://", c.user, ":", c.password, "@", c.addr, "/", c.db} - urlExample := strings.Join(str, "") - //urlExample := fmt.Sprintf("postgres://%s:%s@%s/%s", c.user, c.password, c.addr, c.db) - conn, err := pgx.Connect(context.Background(), urlExample) - if err != nil { - golog.Error("backend conn", "ReConnectPg", "pgx.Connect", 0, "urlExample", urlExample, "error", err.Error()) - return err - } - c.ConnPg = *conn - tcpConn := c.ConnPg.PgConn().Conn().(*net.TCPConn) + if config.ParseNode { + if c.ConnPgx.PgConn() != nil { + c.ConnPgx.Close(context.Background()) + } + // TODO init dbname, default postgres + if c.db == "" { + c.db = "postgres" + } + var str = []string{"postgres://", c.user, ":", c.password, "@", c.addr, "/", c.db} + urlExample := strings.Join(str, "") + //urlExample := fmt.Sprintf("postgres://%s:%s@%s/%s", c.user, c.password, c.addr, c.db) + conn, err := pgx.Connect(context.Background(), urlExample) + if err != nil { + golog.Error("backend conn", "ReConnectPg", "pgx.Connect", 0, "urlExample", urlExample, "error", err.Error()) + return err + } + c.ConnPgx = conn + tcpConn := c.ConnPgx.PgConn().Conn().(*net.TCPConn) - //SetNoDelay controls whether the operating system should delay packet transmission - // in hopes of sending fewer packets (Nagle's algorithm). - // The default is true (no delay), - // meaning that data is sent as soon as possible after a Write. - //I set this option false. - tcpConn.SetNoDelay(false) - tcpConn.SetKeepAlive(true) + //SetNoDelay controls whether the operating system should delay packet transmission + // in hopes of sending fewer packets (Nagle's algorithm). + // The default is true (no delay), + // meaning that data is sent as soon as possible after a Write. + //I set this option false. + tcpConn.SetNoDelay(false) + tcpConn.SetKeepAlive(true) - c.pkg = mysql.NewPacketIO(tcpConn) + c.pkg = mysql.NewPacketIO(tcpConn) + + c.recvData = make([]byte, 0, 16384) + + } else { + if c.ConnPg != nil { + c.ConnPg.Close() + } + // TODO init dbname, default postgres + if c.db == "" { + c.db = "postgres" + } + + dialer := netpoll.NewDialer() + conn, err := dialer.DialConnection("tcp", c.addr, 50*time.Millisecond) + if err != nil { + golog.Error("backend conn", "ReConnectPg", "DialConnection", 0, "urlExample", c.addr, "error", err.Error()) + return err + } + + c.ConnPg = conn + + startupMsg := pgproto3.StartupMessage{ + ProtocolVersion: pgproto3.ProtocolVersionNumber, + Parameters: make(map[string]string), + } + + startupMsg.Parameters["user"] = c.user + startupMsg.Parameters["database"] = c.db + startupMsg.Parameters["datestyle"] = "ISO, MDY" + if os.Getenv("PGDATESTYLE") != "" { + startupMsg.Parameters["datestyle"] = os.Getenv("PGDATESTYLE") + } + startupMsg.Parameters["timezone"] = "PRC" + if os.Getenv("PGTZ") != "" { + startupMsg.Parameters["timezone"] = os.Getenv("PGTZ") + } + if os.Getenv("PGOPTIONS") != "" { + startupMsg.Parameters["options"] = os.Getenv("PGOPTIONS") + } + if config.He3Proxy { + startupMsg.Parameters["he3proxy"] = os.Getenv("HE3PROXY") + } + writer := c.ConnPg.Writer() + writer.WriteBinary(startupMsg.Encode(nil)) + writer.Flush() + reader := c.ConnPg.Reader() + + for { + time.Sleep(time.Microsecond * 1) + buf, _ := reader.Peek(5) + for buf == nil { + time.Sleep(5 * time.Microsecond) + buf, _ = reader.Peek(5) + } + buf, _ = reader.Next(5) + msgLen := int(binary.BigEndian.Uint32(buf[1:])) - 4 + data, _ := reader.Next(msgLen) + if buf[0] == 'R' { + msgType, er := c.findAuthenticationMessageType(data) + if er != nil { + return er + } + switch msg := msgType.(type) { + case *pgproto3.AuthenticationOk: + case *pgproto3.AuthenticationCleartextPassword: + err = c.txPasswordMessage(c.password) + if err != nil { + c.ConnPg.Close() + return errors.New("failed to write password message") + } + case *pgproto3.AuthenticationMD5Password: + msg.Decode(data) + digestedPassword := "md5" + hexMD5(hexMD5(c.password+c.user)+string(msg.Salt[:])) + err = c.txPasswordMessage(digestedPassword) + if err != nil { + c.ConnPg.Close() + return errors.New("failed to write password message") + } + } + } + if buf[0] == 'K' { + msg := pgproto3.BackendKeyData{} + msg.Decode(data) + c.ProcessID = msg.ProcessID + c.SecretKey = msg.SecretKey + } + if buf[0] == 'Z' { + break + } + } - c.recvData = make([]byte, 0, 16384) - //if err := c.ConnPg.Ping(context.Background()); err != nil { - // golog.Error("backend conn", "ReConnectPg", "pgx.Ping", 0, "urlExample", urlExample, "error", err.Error()) - // c.ConnPg.Close(context.Background()) - // return err - //} + c.pkg = mysql.NewPacketIO(conn) + + reader.Release() + + c.recvData = make([]byte, 0, 16384) + //if err := c.ConnPg.Ping(context.Background()); err != nil { + // golog.Error("backend conn", "ReConnectPg", "pgx.Ping", 0, "urlExample", urlExample, "error", err.Error()) + // c.ConnPg.Close(context.Background()) + // return err + //} + } return nil } +func (c *Conn) txPasswordMessage(password string) (err error) { + msg := &pgproto3.PasswordMessage{Password: password} + _, err = c.ConnPg.Write(msg.Encode(nil)) + return err +} + +func hexMD5(s string) string { + hash := md5.New() + io.WriteString(hash, s) + return hex.EncodeToString(hash.Sum(nil)) +} + +const ( + AuthTypeOk = 0 + AuthTypeCleartextPassword = 3 + AuthTypeMD5Password = 5 + AuthTypeSCMCreds = 6 + AuthTypeGSS = 7 + AuthTypeGSSCont = 8 + AuthTypeSSPI = 9 + AuthTypeSASL = 10 + AuthTypeSASLContinue = 11 + AuthTypeSASLFinal = 12 +) + +func (c *Conn) findAuthenticationMessageType(src []byte) (pgproto3.BackendMessage, error) { + if len(src) < 4 { + return nil, errors.New("authentication message too short") + } + authType := binary.BigEndian.Uint32(src[:4]) + + switch authType { + case AuthTypeOk: + return new(pgproto3.AuthenticationOk), nil + case AuthTypeCleartextPassword: + return new(pgproto3.AuthenticationCleartextPassword), nil + case AuthTypeMD5Password: + return new(pgproto3.AuthenticationMD5Password), nil + case AuthTypeSCMCreds: + return nil, errors.New("AuthTypeSCMCreds is unimplemented") + case AuthTypeGSS: + return nil, errors.New("AuthTypeGSS is unimplemented") + case AuthTypeGSSCont: + return nil, errors.New("AuthTypeGSSCont is unimplemented") + case AuthTypeSSPI: + return nil, errors.New("AuthTypeSSPI is unimplemented") + case AuthTypeSASL: + return nil, errors.New("AuthTypeSASL is unimplemented") + case AuthTypeSASLContinue: + return nil, errors.New("AuthTypeSASLContinue is unimplemented") + case AuthTypeSASLFinal: + return nil, errors.New("AuthTypeSASLFinal is unimplemented") + default: + return nil, fmt.Errorf("unknown authentication type: %d", authType) + } +} + func (c *Conn) Close() error { - if c.conn != nil{ + if c.conn != nil { c.conn.Close() c.conn = nil c.salt = nil c.pkgErr = nil } - if !c.ConnPg.IsClosed(){ - c.ConnPg.Close(context.Background()) + if c.ConnPg != nil { + c.ConnPg.Close() + } + if c.ConnPgx.PgConn()!=nil && !c.ConnPgx.IsClosed() { + c.ConnPgx.Close(context.Background()) } return nil @@ -409,10 +613,17 @@ func (c *Conn) Ping() error { return err } } else { - if err := c.ConnPg.Ping(context.Background()); err != nil { - golog.Error("backend conn", "Ping", "ConnPg.Ping", 0, "error", err.Error()) - c.ConnPg.Close(context.Background()) - return err + if config.ParseNode { + if err := c.ConnPgx.Ping(context.Background()); err != nil { + golog.Error("backend conn", "Ping", "ConnPg.Ping", 0, "error", err.Error()) + c.ConnPgx.Close(context.Background()) + return err + } + } else { + if c.ConnPg == nil { + golog.Error("backend conn", "Ping", "ConnPg.Ping is not active", 0) + return errors.New("ConnPg.Ping is not active") + } } } @@ -789,32 +1000,25 @@ func (c *Conn) GetCharset() string { return c.charset } -func (c *Conn) WritePgPacket(data []byte) error { - //_, err := c.ConnPg.PgConn().Conn().Write(data) - _, err := c.pkg.Wb.Write(data) - c.pkgErr = err - return err -} - -func (c *Conn) ReadPgPacket(data []byte) ([]byte, error) { +func (c *Conn) ReadPgPacket(reader netpoll.Reader) ([]byte, error) { // read header - if _, err := io.ReadFull(c.pkg.Rb, data); err != nil { + data, err := reader.Peek(5) + if err != nil { return nil, err } - - msgLen := int(binary.BigEndian.Uint32(data[1:]) - 4) + msgLen := int(binary.BigEndian.Uint32(data[1:])) //build receive byte - if msgLen > cap(c.recvData) { - c.recvData = make([]byte, msgLen, msgLen) + if msgLen+1 > cap(c.recvData) { + c.recvData = make([]byte, msgLen+1, msgLen+1) } else { - c.recvData = c.recvData[:msgLen] + c.recvData = c.recvData[:msgLen+1] } - if _, err := io.ReadAtLeast(c.pkg.Rb, c.recvData, msgLen); err != nil { + c.recvData, err = reader.Next(msgLen + 1) + if err != nil { return nil, err } - data = append(data, c.recvData...) - return data, nil + return c.recvData, nil } // pg msg streaming can not through this way to get all msg diff --git a/backend/db.go b/backend/db.go index cc814ff..1752937 100644 --- a/backend/db.go +++ b/backend/db.go @@ -573,8 +573,10 @@ func (db *DB) GetConnFromIdle(cacheConns, idleConns chan *Conn) (*Conn, error) { select { case co = <-idleConns: atomic.AddInt64(&db.popConnCount, 1) - golog.Warn("db", "PopConnPg", "conn is nil, get from idle conns ", 0, "addr", db.addr, - "cacheConns len", len(cacheConns), "dbuser+dbname", db.user+db.db) + if golog.GetLevel() <= golog.LevelWarn { + golog.Warn("db", "PopConnPg", "conn is nil, get from idle conns ", 0, "addr", db.addr, + "cacheConns len", len(cacheConns), "dbuser+dbname", db.user+db.db) + } // 需要判断是否大于最大链接数 if db.IsExceedMaxConns() { err = errors.ErrConnIsFull @@ -604,26 +606,28 @@ func (db *DB) GetConnFromIdle(cacheConns, idleConns chan *Conn) (*Conn, error) { return nil, errors.ErrBadConn } } - //case <-time.After(10 * time.Millisecond): - // golog.Warn("db", "PopConnPg", "waiting conn time After 10ms", 0, "addr", db.addr, - // "cacheConns len", len(cacheConns), "dbuser+dbname", db.user+db.db) - // // 需要判断是否大于最大链接数 - // if db.IsExceedMaxConns() { - // err = errors.ErrConnIsFull - // return nil, err - // } - // //返回一个链接 - // co, err = db.newConn(db.user) - // if err != nil { - // db.closeConn(co) - // return nil, err - // } - // err = co.Ping() - // if err != nil { - // db.closeConn(co) - // return nil, errors.ErrBadConn - // } - // return co, nil + case <-time.After(1 * time.Millisecond): + if golog.GetLevel() <= golog.LevelWarn { + golog.Warn("db", "PopConnPg", "waiting conn time After 1ms", 0, "addr", db.addr, + "cacheConns len", len(cacheConns), "dbuser+dbname", db.user+db.db) + } + // 需要判断是否大于最大链接数 + if db.IsExceedMaxConns() { + err = errors.ErrConnIsFull + return nil, err + } + //返回一个链接 + co, err = db.newConn(db.user) + if err != nil { + db.closeConn(co) + return nil, err + } + err = co.Ping() + if err != nil { + db.closeConn(co) + return nil, errors.ErrBadConn + } + return co, nil } } return co, nil diff --git a/backend/node.go b/backend/node.go index f76d410..cefda66 100644 --- a/backend/node.go +++ b/backend/node.go @@ -217,6 +217,7 @@ func (n *Node) GetSlaveConnPg(dbname string, dbuser string, tablename string) (* } func (n *Node) checkMaster() { + config.ParseNode = true db := n.Master if db == nil { golog.Error("Node", "checkMaster", "Master is no alive", 0) @@ -255,9 +256,11 @@ func (n *Node) checkMaster() { "Master_down_time", int64(n.DownAfterNoAlive/time.Second)) n.DownMaster(db.addr, Down) } + config.ParseNode = false } func (n *Node) checkSlave() { + config.ParseNode = true n.RLock() if n.Slave == nil { n.RUnlock() @@ -290,6 +293,7 @@ func (n *Node) checkSlave() { n.DownSlave(slaves[i].addr, Down) } } + config.ParseNode = false } @@ -460,7 +464,7 @@ func (n *Node) DownSlave(addr string, state int32) error { // get data from db in loop func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { var err error - if conn == nil || conn.ConnPg.Ping(context.Background()) != nil { + if conn == nil || conn.ConnPgx.Ping(context.Background())!= nil { n.Master, err = n.OpenDB(masterStr) if err != nil { if golog.GetLevel() <= golog.LevelError { @@ -474,7 +478,7 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { } // init read-only node's LSN - rows, er := conn.ConnPg.Query(context.Background(), "select client_addr::text, replay_lsn from pg_stat_replication;") + rows, er := conn.ConnPgx.Query(context.Background(), "select client_addr::text, replay_lsn from pg_stat_replication;") if er != nil { if golog.GetLevel() <= golog.LevelError { golog.Error("node", "ParseMaster", fmt.Sprintf("Query pg_stat_replication failed: %s", er.Error()), 0) @@ -506,7 +510,7 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { } } // init master node's LSN - er = conn.ConnPg.QueryRow(context.Background(), "select pg_current_wal_lsn();").Scan(&lsn) + er = conn.ConnPgx.QueryRow(context.Background(), "select pg_current_wal_lsn();").Scan(&lsn) if er != nil { golog.Error("node", "ParseMaster", "scan pg_current_wal_lsn err !"+er.Error(), 0) } else { @@ -524,7 +528,7 @@ func (n *Node) getLsnAndCacheMetadata(conn *Conn, masterStr string) { } } // init metadata about table which node cached - rows, er = conn.ConnPg.Query(context.Background(), "select datname, relname, clientaddr from pg_hot_data;") + rows, er = conn.ConnPgx.Query(context.Background(), "select datname, relname, clientaddr from pg_hot_data;") if er != nil { if golog.GetLevel() <= golog.LevelError { golog.Error("node", "ParseMaster", fmt.Sprintf("Query pg_hot_data failed: %s", er.Error()), 0) @@ -566,7 +570,9 @@ func (n *Node) ParseMaster(masterStr string) error { return errors.ErrNoMasterDB } var err error + config.ParseNode = true n.Master, err = n.OpenDB(masterStr) + config.ParseNode = false return err } @@ -606,6 +612,7 @@ func (n *Node) ParseSlave(slaveStr string) error { n.SlaveWeights = make([]int, 0, count) //parse addr and weight + config.ParseNode = true for i := 0; i < count; i++ { addrAndWeight := strings.Split(slaveArray[i], WeightSplit) if len(addrAndWeight) == 2 { @@ -622,6 +629,7 @@ func (n *Node) ParseSlave(slaveStr string) error { } n.Slave = append(n.Slave, db) } + config.ParseNode = false n.InitBalancer() if config.Metrics && (n.Cfg.LoadBalanceMode == "metric" || n.Cfg.LoadBalanceMode == "lsn") { diff --git a/cmd/he3proxy/main.go b/cmd/he3proxy/main.go index 3e3936c..540b932 100644 --- a/cmd/he3proxy/main.go +++ b/cmd/he3proxy/main.go @@ -74,6 +74,8 @@ var cpuProfile = flag.Bool("cpu-profile", true, "analysis cpu profile") var readonly = flag.Bool("readonly", false, "improve read performance by not release conn") +var keepConn = flag.Bool("keepConn", true, "keep use current conn, and not release util conn release") + const ( sqlLogName = "sql.log" sysLogName = "sys.log" @@ -120,6 +122,7 @@ func main() { config.ConnPool = *connectionPool config.ServerVersion = *serverVersion config.SingleSession = *singleSession + config.KeepConn = *keepConn config.ReadOnly = *readonly config.ConnPool = *connectionPool config.He3Proxy = *he3proxyFlag diff --git a/config/config.go b/config/config.go index 6653dbd..cb29be0 100644 --- a/config/config.go +++ b/config/config.go @@ -31,6 +31,7 @@ var DbType string var ConnPool bool var SingleSession bool var ReadOnly bool +var KeepConn bool var He3Proxy bool //if false will close metrics collection, default true @@ -47,6 +48,8 @@ var CacheConnsMap sync.Map var IdleConnsMap sync.Map +var ParseNode bool + const ( Mysql = "mysql" PG = "postgresql" diff --git a/go.mod b/go.mod index 47e3373..40e4eb9 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,9 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect + github.com/bytedance/gopkg v0.0.0-20220413063733-65bf48ffb3a7 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cloudwego/netpoll v0.3.1 // indirect github.com/cockroachdb/apd v1.1.1-0.20181017181144-bced77f817b4 // indirect github.com/cockroachdb/errors v1.8.2 // indirect github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect @@ -47,6 +49,7 @@ require ( github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.10 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pkg/profile v1.7.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect diff --git a/go.sum b/go.sum index fcfde42..6346fc1 100644 --- a/go.sum +++ b/go.sum @@ -56,6 +56,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bytedance/gopkg v0.0.0-20220413063733-65bf48ffb3a7 h1:PtwsQyQJGxf8iaPptPNaduEIu9BnrNms+pcRdHAxZaM= +github.com/bytedance/gopkg v0.0.0-20220413063733-65bf48ffb3a7/go.mod h1:2ZlV9BaUH4+NXIBF0aMdKKAnHTzqH+iMU4KUjAbL23Q= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d h1:S2NE3iHSwP0XV47EEXL8mWmRdEfGscSJ+7EgePNgt0s= @@ -67,6 +69,8 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudwego/netpoll v0.3.1 h1:xByoORmCLIyKZ8gS+da06WDo3j+jvmhaqS2KeKejtBk= +github.com/cloudwego/netpoll v0.3.1/go.mod h1:1T2WVuQ+MQw6h6DpE45MohSvDTKdy2DlzCx2KsnPI4E= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= @@ -393,6 +397,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/parnurzeal/gorequest v0.2.16 h1:T/5x+/4BT+nj+3eSknXmCTnEVGSzFzPGdpqmUVVZXHQ= github.com/parnurzeal/gorequest v0.2.16/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= @@ -707,6 +713,7 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220110181412-a018aaa089fe/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= diff --git a/proxy/server/conn.go b/proxy/server/conn.go index 3a1d371..94426da 100644 --- a/proxy/server/conn.go +++ b/proxy/server/conn.go @@ -86,12 +86,13 @@ type ClientConn struct { // save receive data from backend, will send to client dataRecv []byte - dataHeader []byte // flag for use extended query protocol parseFlag bool - Parse sync.Map //parse 通道name及parse信息 + Parse sync.Map //parse name and parse info + + isInTrxPg bool // use for pg, true means connection in transaction } const ( diff --git a/proxy/server/conn_pgsql.go b/proxy/server/conn_pgsql.go index f132ca4..1548555 100644 --- a/proxy/server/conn_pgsql.go +++ b/proxy/server/conn_pgsql.go @@ -32,6 +32,7 @@ import ( "time" "unsafe" + "github.com/cloudwego/netpoll" //timecost "github.com/dablelv/go-huge-util" "github.com/jackc/pgproto3/v2" "github.com/jackc/pgx/v4" @@ -78,14 +79,16 @@ func (cc *ClientConn) handshake(ctx context.Context) error { val, flag := clientConnMap.Load(m.(*pgproto3.CancelRequest).ProcessID) if flag { c := val.(*ClientConn) - if c != nil && c.backendConn != nil && c.backendConn.Conn != nil && c.backendConn.ConnPg.PgConn() != nil { - cancelRequest := &pgproto3.CancelRequest{ProcessID: c.backendConn.ConnPg.PgConn().PID(), - SecretKey: c.backendConn.ConnPg.PgConn().SecretKey()} - err = c.backendConn.Conn.WritePgPacket(cancelRequest.Encode(nil)) + if c != nil && c.backendConn != nil { + cancelRequest := &pgproto3.CancelRequest{ProcessID: c.backendConn.ProcessID, + SecretKey: c.backendConn.SecretKey} + wr := c.backendConn.ConnPg.Writer() + _, err = wr.WriteBinary(cancelRequest.Encode(nil)) if err != nil { golog.Error(moduleName, "CancelRequest", "write msg err: "+err.Error(), cc.connectionId) return err } + wr.Flush() } cc.Close() } @@ -114,7 +117,7 @@ func (c *ClientConn) RunPg(ctx context.Context) { } //set transaction status for backend conn if c.backendConn != nil { - if c.isInTransaction() { + if c.isInTrxPg { c.backendConn.IsInTransaction = true } else { c.backendConn.IsInTransaction = false @@ -127,11 +130,7 @@ func (c *ClientConn) RunPg(ctx context.Context) { // flag for just use master node, just use for some special cases // treat it simple and crude, set in transaction if config.SingleSession { - c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - c.status |= mysql.SERVER_STATUS_IN_TRANS - c.alwaysCurNode = true - } - if config.ReadOnly { + c.isInTrxPg = true c.alwaysCurNode = true } for { @@ -325,12 +324,14 @@ func (cc *ClientConn) handleParsePrepare(ctx context.Context) error { golog.Debug(moduleName, "handleParsePrepare", fmt.Sprintf("write cached parse data is: %s", string(parseData)), cc.connectionId) } - err := cc.backendConn.Conn.WritePgPacket(parseData) - if err != nil { + wr := cc.backendConn.Conn.ConnPg.Writer() + n, err := wr.WriteBinary(parseData) + if n != len(parseData) || err != nil { if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleParsePrepare", fmt.Sprintf("write parse to connection err: %s", err.Error()), cc.connectionId) } } + wr.Flush() return err } @@ -356,11 +357,13 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte } else { // change status, use for load balance in begin statement.(select first and than insert/update/delete) // we can do load balance for 'first insert', will improve machine throughput. - if cc.beginFlag == BEGIN_PRESTART_COMMIT { - cc.beginFlag = BEGIN_RELSTART + if cc.isInTrxPg || !config.ReadOnly { + if cc.beginFlag == BEGIN_PRESTART_COMMIT { + cc.beginFlag = BEGIN_RELSTART + } } } - if cc.backendConn == nil || cc.backendConn.Conn == nil { + if cc.backendConn == nil || cc.backendConn.Conn == nil || cc.backendConn.Conn.ConnPg == nil { return errors.ErrConnIsNil } defer cc.closeConn(cc.backendConn, false) @@ -371,21 +374,28 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte // deal with duplicate "begin", will return 'WARNING: there is already a transaction in progress'. if cc.beginFlag == BEGIN_PRESTART_COMMIT && "BEGIN" == strings.ToUpper(strings.ReplaceAll(sql, ";", "")) { - errRes := pgproto3.ErrorResponse{ - Severity: "WARNING", - SeverityUnlocalized: "WARNING", - Code: "25001", - Message: "there is already a transaction in progress", - File: "xact.c", - Line: 3689, - Routine: "BeginTransactionBlock", - } - var nRes pgproto3.NoticeResponse - nRes = pgproto3.NoticeResponse(errRes) - cmdComplete := &pgproto3.CommandComplete{CommandTag: stringTobyteSlice("BEGIN")} - cc.WriteData((&pgproto3.ReadyForQuery{TxStatus: 'T'}).Encode(cmdComplete.Encode((&nRes).Encode(nil)))) + //errRes := pgproto3.ErrorResponse{ + // Severity: "WARNING", + // SeverityUnlocalized: "WARNING", + // Code: "25001", + // Message: "there is already a transaction in progress", + // File: "xact.c", + // Line: 3689, + // Routine: "BeginTransactionBlock", + //} + //var nRes pgproto3.NoticeResponse + //nRes = pgproto3.NoticeResponse(errRes) + //cmdComplete := &pgproto3.CommandComplete{CommandTag: stringTobyteSlice("BEGIN")} + //cc.WriteData((&pgproto3.ReadyForQuery{TxStatus: 'T'}).Encode(cmdComplete.Encode((&nRes).Encode(nil)))) + cc.WriteData([]byte{78, 0, 0, 0, 111, 83, 87, 65, 82, 78, 73, 78, 71, 0, 86, 87, 65, 82, 78, 73, 78, 71, 0, 67, + 50, 53, 48, 48, 49, 0, 77, 116, 104, 101, 114, 101, 32, 105, 115, 32, 97, 108, 114, 101, 97, 100, 121, 32, + 97, 32, 116, 114, 97, 110, 115, 97, 99, 116, 105, 111, 110, 32, 105, 110, 32, 112, 114, 111, 103, 114, 101, + 115, 115, 0, 70, 120, 97, 99, 116, 46, 99, 0, 76, 51, 54, 56, 57, 0, 82, 66, 101, 103, 105, 110, 84, 114, + 97, 110, 115, 97, 99, 116, 105, 111, 110, 66, 108, 111, 99, 107, 0, 0, 67, 0, 0, 0, 10, 66, 69, 71, 73, 78, + 0, 90, 0, 0, 0, 5, 84}) return nil } + var reader, writer = cc.backendConn.Conn.ConnPg.Reader(), cc.backendConn.Conn.ConnPg.Writer() // handle for 'begin' statement, and when exec insert/update/delete statement add 'begin' // 1.begin with commit ('begin' to 'begin;...;commit') @@ -395,24 +405,34 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte // 4.deal with statement as in the transaction until commit if cc.beginFlag == BEGIN_PRESTART { // write msg to client directly - cmdComplete := &pgproto3.CommandComplete{CommandTag: stringTobyteSlice("BEGIN")} - cc.WriteData((&pgproto3.ReadyForQuery{TxStatus: 'T'}).Encode(cmdComplete.Encode(nil))) + //cmdComplete := &pgproto3.CommandComplete{CommandTag: stringTobyteSlice("BEGIN")} + //cc.WriteData((&pgproto3.ReadyForQuery{TxStatus: 'T'}).Encode(cmdComplete.Encode(nil))) + cc.WriteData([]byte{67, 0, 0, 0, 10, 66, 69, 71, 73, 78, 0, 90, 0, 0, 0, 5, 84}) cc.beginFlag = BEGIN_PRESTART_COMMIT return nil } else if cc.beginFlag == BEGIN_RELSTART { // when first exec write ops after begin, will prior exec 'begin' statement. - sqlStr := "BEGIN;" - err = cc.backendConn.Conn.WritePgPacket((&pgproto3.Query{String: sqlStr}).Encode(nil)) + //sqlStr := "BEGIN;" + //_, err = writer.WriteBinary((&pgproto3.Query{String: sqlStr}).Encode(nil)) + _, err = writer.WriteBinary([]byte{81, 0, 0, 0, 11, 66, 69, 71, 73, 78, 59, 0}) + if err != nil { + if golog.GetLevel() <= golog.LevelError { + golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) + } + return err + } + // exec current statement + _, err = writer.WriteBinary(data) if err != nil { if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) } return err } + writer.Flush() // consume msg from backend, but not return to client - header := make([]byte, 5) for { - d, e := cc.backendConn.Conn.ReadPgPacket(header) + d, e := cc.backendConn.Conn.ReadPgPacket(reader) if e != nil { if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", e.Error()), cc.connectionId) @@ -420,30 +440,21 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte return e } if d[0] == 'Z' { - cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - cc.status |= mysql.SERVER_STATUS_IN_TRANS + cc.isInTrxPg = true break } } - // exec current statement - err = cc.backendConn.Conn.WritePgPacket(data) - if err != nil { - if golog.GetLevel() <= golog.LevelError { - golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) - } - return err - } // reset flag cc.beginFlag = BEGIN_RELSTART_BEGIN - } else { - err = cc.backendConn.Conn.WritePgPacket(data) - if err != nil { + n, er := writer.WriteBinary(data) + if er != nil || n != len(data) { if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("write msg err: %s", err.Error()), cc.connectionId) } return err } + writer.Flush() } // mock @@ -451,7 +462,7 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte //cc.WriteData(by) //return err - err = cc.receiveBackendMsg(ctx) + err = cc.receiveBackendMsg(reader) if err != nil { if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "handleQueryPg", fmt.Sprintf("receiveBackend msg err: %s", err.Error()), cc.connectionId) @@ -462,31 +473,21 @@ func (cc *ClientConn) handleQueryPg(ctx context.Context, sql string, data []byte return nil } -func stringTobyteSlice(s string) []byte { - - tmp1 := (*[2]uintptr)(unsafe.Pointer(&s)) - - tmp2 := [3]uintptr{tmp1[0], tmp1[1], tmp1[1]} - - return *(*[]byte)(unsafe.Pointer(&tmp2)) - -} - -func slicePgMsg(msg []byte) (res [][]byte) { - res = make([][]byte, 0) - if len(msg) == 0 { - return res - } - for len(msg) > 0 { - msgLen := binary.BigEndian.Uint32(msg[1:5]) - res = append(res, msg[:1+msgLen]) - msg = msg[1+msgLen:] - } - return res -} - +//func slicePgMsg(msg []byte) (res [][]byte) { +// res = make([][]byte, 0) +// if len(msg) == 0 { +// return res +// } +// for len(msg) > 0 { +// msgLen := binary.BigEndian.Uint32(msg[1:5]) +// res = append(res, msg[:1+msgLen]) +// msg = msg[1+msgLen:] +// } +// return res +//} +// // receive server connection msg, add deal with it - +// //func (cc *ClientConn) receiveBackendMsg(ctx context.Context) error { // msg, err := cc.backendConn.Conn.ReadPgAllPacket() // if err != nil { @@ -581,14 +582,64 @@ func slicePgMsg(msg []byte) (res [][]byte) { //} // receive server connection msg, add deal with it -func (cc *ClientConn) receiveBackendMsg(ctx context.Context) error { - var err error +func (cc *ClientConn) receiveBackendMsg(reader netpoll.Reader) error { cc.dataRecv = cc.dataRecv[:0] + if config.ReadOnly { + time.Sleep(time.Microsecond * 1) + mlen := reader.Len() + for mlen == 0 { + time.Sleep(time.Microsecond * 2) + mlen = reader.Len() + } + data, err := reader.Next(mlen) + //if err != nil { + // if golog.GetLevel() <= golog.LevelError { + // golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", err.Error()), cc.connectionId) + // } + // return err + //} + //if cc.beginFlag == BEGIN_UNSTART{ + // cc.WriteData(data) + // reader.Release() + // return nil + //} + //if data[mlen-6]=='Z'{ + // if cc.beginFlag == BEGIN_PRESTART_COMMIT { + // cc.dataRecv = BytesCombine(cc.dataRecv, data) + // cc.dataRecv[mlen-1] = 'T' + // cc.WriteData(cc.dataRecv) + // reader.Release() + // return nil + // } else if cc.beginFlag == BEGIN_RELSTART_BEGIN { + // if data[5] == 'I' && !cc.alwaysCurNode { + // cc.isInTrxPg = false + // } else { + // cc.isInTrxPg = true + // } + // cc.beginFlag = BEGIN_COMMIT + // } else { + // if data[5] == 'T' && !cc.isInTrxPg { + // cc.isInTrxPg = true + // } else if data[5] == 'I' && !cc.alwaysCurNode { + // //cc.status |= mysql.SERVER_STATUS_AUTOCOMMIT + // //cc.status &= ^mysql.SERVER_STATUS_IN_TRANS + // if cc.isInTrxPg { + // cc.isInTrxPg = false + // } + // if cc.beginFlag != BEGIN_UNSTART { + // cc.beginFlag = BEGIN_UNSTART + // } + // } + // } + //} + cc.WriteData(data) + reader.Release() + return err + } readloop: for { - cc.dataHeader = cc.dataHeader[:5] - cc.dataHeader, err = cc.backendConn.Conn.ReadPgPacket(cc.dataHeader) + data, err := cc.backendConn.Conn.ReadPgPacket(reader) if err != nil { if golog.GetLevel() <= golog.LevelError { golog.Error(moduleName, "receiveBackendMsg", fmt.Sprintf("read packet from backend err: %s", err.Error()), cc.connectionId) @@ -596,70 +647,32 @@ readloop: return err } if golog.GetLevel() <= golog.LevelTrace { - golog.Trace(moduleName, "receiveBackendMsg", fmt.Sprintf("recv packet from backend msg type: %s", string(cc.dataHeader[0])), cc.connectionId) - } - // deal with copy msg - if cc.dataHeader[0] == 'G' || cc.dataHeader[0] == 'W' { - // in transaction - cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - cc.status |= mysql.SERVER_STATUS_IN_TRANS - cc.dataRecv = BytesCombine(cc.dataRecv, cc.dataHeader) - cc.WriteData(cc.dataRecv) - cc.dataRecv = cc.dataRecv[:0] - break readloop - } - if cc.dataHeader[0] == 'H' { - cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - cc.status |= mysql.SERVER_STATUS_IN_TRANS - cc.dataRecv = BytesCombine(cc.dataRecv, cc.dataHeader) - cc.WriteData(cc.dataRecv) - cc.dataRecv = cc.dataRecv[:0] - continue - } - // add new protocol 'L' for read consistency - if cc.dataHeader[0] == 'L' { - lsn := pgproto3.LsnResponse{} - lsn.Decode(cc.dataHeader[5:]) - addr := cc.backendConn.ConnPg.PgConn().Conn().RemoteAddr().String() - if golog.GetLevel() <= golog.LevelDebug { - golog.Debug("pg conn", "receiveBackendMsg", fmt.Sprintf("addr: %s, lsn: %d", addr, lsn.LSN), cc.connectionId) - } - // set LSN to node - if addr != "" { - cc.nodes["node1"].NodeLsn.Store(strings.Split(addr, ":")[0], lsn.LSN) - } - // set LSN to db_table - if cc.table != "" && cc.db != "" { - cc.nodes["node1"].NodeLsn.Store(cc.db+"_"+cc.table, lsn.LSN) - } - continue + golog.Trace(moduleName, "receiveBackendMsg", fmt.Sprintf("recv packet from backend msg type: %s", string(data[0])), cc.connectionId) } + switch data[0] { // deal with msg for readForQuery. return msg - if cc.dataHeader[0] == 'Z' { - q := pgproto3.ReadyForQuery{} - q.Decode(cc.dataHeader[5:]) + case 'Z': // deal with 'begin-commit' statement, if begin-select will return 'T' for front, // means in transaction, actually backend not in transaction. Do sql with load balance if cc.beginFlag == BEGIN_PRESTART_COMMIT { - cc.dataHeader = (&pgproto3.ReadyForQuery{TxStatus: 'T'}).Encode(nil) + // (&pgproto3.ReadyForQuery{TxStatus: 'T'}).Encode(nil) + data = []byte{90, 0, 0, 0, 5, 84} } else if cc.beginFlag == BEGIN_RELSTART_BEGIN { - if q.TxStatus == 'I' && !cc.alwaysCurNode { - cc.status = mysql.SERVER_STATUS_AUTOCOMMIT + if data[5] == 'I' && !cc.alwaysCurNode { + cc.isInTrxPg = false } else { - cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - cc.status |= mysql.SERVER_STATUS_IN_TRANS + cc.isInTrxPg = true } cc.beginFlag = BEGIN_COMMIT } else { - if q.TxStatus == 'T' && !cc.isInTransaction() { - cc.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - cc.status |= mysql.SERVER_STATUS_IN_TRANS - } else if q.TxStatus == 'I' && !cc.alwaysCurNode { + if data[5] == 'T' && !cc.isInTrxPg { + cc.isInTrxPg = true + } else if data[5] == 'I' && !cc.alwaysCurNode { //cc.status |= mysql.SERVER_STATUS_AUTOCOMMIT //cc.status &= ^mysql.SERVER_STATUS_IN_TRANS - if cc.isInTransaction() { - cc.status = mysql.SERVER_STATUS_AUTOCOMMIT + if cc.isInTrxPg { + cc.isInTrxPg = false } if cc.beginFlag != BEGIN_UNSTART { cc.beginFlag = BEGIN_UNSTART @@ -667,7 +680,7 @@ readloop: } } - cc.dataRecv = BytesCombine(cc.dataRecv, cc.dataHeader) + cc.dataRecv = BytesCombine(cc.dataRecv, data) err = cc.WriteData(cc.dataRecv) if err != nil { if golog.GetLevel() <= golog.LevelError { @@ -677,14 +690,43 @@ readloop: } cc.dataRecv = cc.dataRecv[:0] break readloop - } - if cc.dataHeader[0] == 'E' { + case 'L': + // add new protocol 'L' for read consistency + lsn := pgproto3.LsnResponse{} + lsn.Decode(data[5:]) + addr := cc.backendConn.ConnPg.RemoteAddr().String() + if golog.GetLevel() <= golog.LevelDebug { + golog.Debug("pg conn", "receiveBackendMsg", fmt.Sprintf("addr: %s, lsn: %d", addr, lsn.LSN), cc.connectionId) + } + // set LSN to node + if addr != "" { + cc.nodes["node1"].NodeLsn.Store(strings.Split(addr, ":")[0], lsn.LSN) + } + // set LSN to db_table + if cc.table != "" && cc.db != "" { + cc.nodes["node1"].NodeLsn.Store(cc.db+"_"+cc.table, lsn.LSN) + } + continue + case 'E': if golog.GetLevel() <= golog.LevelWarn { - golog.Warn(moduleName, "receiveBackendMsg", fmt.Sprintf("read err packet from backend: %s", string(cc.dataHeader)), cc.connectionId) + golog.Warn(moduleName, "receiveBackendMsg", fmt.Sprintf("read err packet from backend: %s", string(data)), cc.connectionId) } + case 'G', 'W': + // deal with copy msg + // in transaction + cc.isInTrxPg = true + cc.dataRecv = BytesCombine(cc.dataRecv, data) + cc.WriteData(cc.dataRecv) + cc.dataRecv = cc.dataRecv[:0] + break readloop + case 'H': + cc.isInTrxPg = true + cc.dataRecv = BytesCombine(cc.dataRecv, data) + cc.WriteData(cc.dataRecv) + cc.dataRecv = cc.dataRecv[:0] + continue } - - cc.dataRecv = BytesCombine(cc.dataRecv, cc.dataHeader) + cc.dataRecv = BytesCombine(cc.dataRecv, data) // TODO At present, all data are returned. // In the future, we need to consider the situation of multiple data. // We need to set a threshold and return in batches @@ -693,6 +735,7 @@ readloop: // cc.dataRecv = cc.dataRecv[:0] //} } + reader.Release() return nil } @@ -707,11 +750,14 @@ func (cc *ClientConn) handleStmtClosePg(ctx context.Context, close pgproto3.Clos cc.Parse.Delete(close.Name) } data := close.Encode(nil) - err := cc.backendConn.Conn.WritePgPacket(data) - if err != nil { - golog.Error("server", "handleStmtClosePg", "write msg err: "+err.Error(), cc.connectionId) - return err + wr := cc.backendConn.Conn.ConnPg.Writer() + n, err := wr.WriteBinary(data) + if n != len(data) || err != nil { + if golog.GetLevel() <= golog.LevelError { + golog.Error(moduleName, "handleParsePrepare", fmt.Sprintf("write parse to connection err: %s", err.Error()), cc.connectionId) + } } + wr.Flush() return cc.writeCloseComplete() } @@ -741,13 +787,16 @@ func (cc *ClientConn) handleStmtSyncPg(ctx context.Context) error { } //return nil } - err := cc.backendConn.Conn.WritePgPacket(cc.dataSend) - if err != nil { - golog.Error(moduleName, "handleStmtSyncPg", "write msg err: "+err.Error(), cc.connectionId) - return err + wr := cc.backendConn.Conn.ConnPg.Writer() + n, err := wr.WriteBinary(cc.dataSend) + if n != len(cc.dataSend) || err != nil { + if golog.GetLevel() <= golog.LevelError { + golog.Error(moduleName, "handleParsePrepare", fmt.Sprintf("write parse to connection err: %s", err.Error()), cc.connectionId) + } } + wr.Flush() - err = cc.receiveBackendMsg(ctx) + err = cc.receiveBackendMsg(cc.backendConn.Conn.ConnPg.Reader()) if err != nil { golog.Error(moduleName, "handleStmtSyncPg", "recv backend msg err: "+err.Error(), cc.connectionId) return err @@ -760,13 +809,15 @@ func (cc *ClientConn) handleCopy(ctx context.Context) error { if cc.backendConn == nil { return nil } - err := cc.backendConn.Conn.WritePgPacket(cc.dataSend) - if err != nil { + writer := cc.backendConn.Conn.ConnPg.Writer() + n, err := writer.WriteBinary(cc.dataSend) + if n != len(cc.dataSend) || err != nil { golog.Error(moduleName, "handleCopy", "write msg err: "+err.Error(), cc.connectionId) return err } + writer.Flush() - err = cc.receiveBackendMsg(ctx) + err = cc.receiveBackendMsg(cc.backendConn.Conn.ConnPg.Reader()) if err != nil { return err } @@ -1268,3 +1319,13 @@ func (cc *ClientConn) isAceessDB(hbaEntry *hba.Entry) error { } return errors.ErrFormat("user %s is not allowed to access db %s.", cc.user, cc.db) } + +func stringTobyteSlice(s string) []byte { + + tmp1 := (*[2]uintptr)(unsafe.Pointer(&s)) + + tmp2 := [3]uintptr{tmp1[0], tmp1[1], tmp1[1]} + + return *(*[]byte)(unsafe.Pointer(&tmp2)) + +} diff --git a/proxy/server/conn_preshard.go b/proxy/server/conn_preshard.go index 99e2345..778c74f 100644 --- a/proxy/server/conn_preshard.go +++ b/proxy/server/conn_preshard.go @@ -178,7 +178,7 @@ func (c *ClientConn) preHandlePg(sql string, ctx context.Context) (*backend.Back // When selecting a node. By default, IsSlave initializing ExecuteDB is false, that is, // it is the master node by default. If it is judged to be a select query request, it is set to true - if c.isInTransaction() { + if c.isInTrxPg { executeDB, err = c.GetTransExecDB(tokens, sql) } else { executeDB, err = c.GetExecDB(tokens, sql) @@ -303,8 +303,7 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) return c.getSelectExecDB(sql, tokens, tokensLen) case mysql.TK_ID_BEGIN: if c.parseFlag { - c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - c.status |= mysql.SERVER_STATUS_IN_TRANS + c.isInTrxPg = true break } // replica node also can exec begin @@ -313,7 +312,7 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) } return c.getSelectExecDB(sql, tokens, tokensLen) case mysql.TK_ID_COMMIT: - if c.isInTransaction() { + if c.isInTrxPg { break } else { if c.beginFlag == BEGIN_PRESTART_COMMIT { @@ -323,13 +322,11 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) } case mysql.TK_ID_CREATE: if strings.Contains(strings.ToUpper(sql), "CREATE TEMPORARY TABLE") { - c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - c.status |= mysql.SERVER_STATUS_IN_TRANS + c.isInTrxPg = true } break case mysql.TK_ID_SET: - c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - c.status |= mysql.SERVER_STATUS_IN_TRANS + c.isInTrxPg = true c.alwaysCurNode = true break } @@ -372,20 +369,17 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) return c.getSelectExecDB(sql, tokens, tokensLen) case *tree.SetVar, *tree.SetTransaction, *tree.SetClusterSetting, *tree.SetSessionAuthorizationDefault, *tree.SetSessionCharacteristics, *tree.SetTracing, *tree.SetZoneConfig, *tree.CreateSequence: - c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - c.status |= mysql.SERVER_STATUS_IN_TRANS + c.isInTrxPg = true c.alwaysCurNode = true continue case *tree.CreateTable: if strings.Contains(strings.ToUpper(n.String()), "CREATE TEMPORARY TABLE") { - c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - c.status |= mysql.SERVER_STATUS_IN_TRANS + c.isInTrxPg = true } continue case *tree.BeginTransaction: if c.parseFlag { - c.status &= ^mysql.SERVER_STATUS_AUTOCOMMIT - c.status |= mysql.SERVER_STATUS_IN_TRANS + c.isInTrxPg = true continue } // replica node also can exec begin @@ -394,7 +388,7 @@ func (c *ClientConn) GetExecDB(tokens []string, sql string) (*ExecuteDB, error) } return c.getSelectExecDB(sql, tokens, tokensLen) case *tree.CommitTransaction: - if c.isInTransaction() { + if c.isInTrxPg { continue } else { if c.beginFlag == BEGIN_PRESTART_COMMIT { diff --git a/proxy/server/conn_query.go b/proxy/server/conn_query.go index 8795446..c3b5b82 100644 --- a/proxy/server/conn_query.go +++ b/proxy/server/conn_query.go @@ -15,7 +15,6 @@ package server import ( - "context" "fmt" "runtime" "strings" @@ -25,6 +24,7 @@ import ( "github.com/jackc/pgproto3/v2" "gitee.com/he3db/he3proxy/backend" + "gitee.com/he3db/he3proxy/config" "gitee.com/he3db/he3proxy/core/errors" "gitee.com/he3db/he3proxy/core/golog" "gitee.com/he3db/he3proxy/core/hack" @@ -167,7 +167,7 @@ func (c *ClientConn) getBackendConn(n *backend.Node, fromSlave bool) (co *backen } func (c *ClientConn) getBackendConnPg(n *backend.Node, fromSlave bool) (co *backend.BackendConn, err error) { - if !c.isInTransaction() { + if !c.isInTrxPg { if fromSlave { co, err = n.GetSlaveConnPg(c.db, c.user, c.table) //如果是链接池满的错误 则直接返回错误 不在尝试使用master节点 @@ -342,7 +342,7 @@ func (c *ClientConn) executeInMultiNodes(conns map[string]*backend.BackendConn, func (c *ClientConn) closeConn(conn *backend.BackendConn, rollback bool) { //TODO 事物 - if c.isInTransaction() || c.alwaysCurNode { + if c.isInTransaction() || c.alwaysCurNode || c.isInTrxPg || config.KeepConn{ return } defer conn.Close() @@ -358,15 +358,15 @@ func (c *ClientConn) closeConn(conn *backend.BackendConn, rollback bool) { parseData = cl.Encode(parseData) return true }) - if parseData != nil || len(parseData) > 0 { - golog.Debug(moduleName, "closeConn", fmt.Sprintf("close prepare statement: [%s], len: [%d]", - string(parseData), len(parseData)), c.connectionId) - c.backendConn.Conn.WritePgPacket(parseData) - _, err := c.backendConn.Conn.ConnPg.Exec(context.Background(), "RESET ALL") - if err != nil { - golog.Warn("db", "Close", "write close parse msg err: "+err.Error(), '0') - } - } + //if parseData != nil || len(parseData) > 0 { + // golog.Debug(moduleName, "closeConn", fmt.Sprintf("close prepare statement: [%s], len: [%d]", + // string(parseData), len(parseData)), c.connectionId) + // c.backendConn.Conn.WritePgPacket(parseData) + // _, err := c.backendConn.Conn.ConnPg.Exec(context.Background(), "RESET ALL") + // if err != nil { + // golog.Warn("db", "Close", "write close parse msg err: "+err.Error(), '0') + // } + //} } func (c *ClientConn) closeShardConns(conns map[string]*backend.BackendConn, rollback bool) { diff --git a/proxy/server/server.go b/proxy/server/server.go index 5a78530..a066bbf 100644 --- a/proxy/server/server.go +++ b/proxy/server/server.go @@ -371,8 +371,8 @@ func (s *Server) newClientConn(co net.Conn) *ClientConn { c.alwaysCurNode = false c.dataSend = make([]byte, 0, mysql.MaxPayloadLen) c.dataRecv = make([]byte, 0, mysql.MaxPayloadLen) - c.dataHeader = make([]byte, 5, 8192) c.parseFlag = false + c.isInTrxPg = false return c } -- Gitee From fbed3e12c7fcfe01ace54b25cd8b2daef3250ad4 Mon Sep 17 00:00:00 2001 From: wangyao Date: Mon, 28 Nov 2022 19:46:14 +0800 Subject: [PATCH 7/7] preformace client-proxy read data by zerocpoy --- backend/backend_conn.go | 4 ++ etc/he3proxy.yaml | 1 + proxy/server/buffer.go | 61 +++++++++++++++++++++++++ proxy/server/conn_pgsql.go | 93 ++++++++++++++++++++++++++------------ 4 files changed, 131 insertions(+), 28 deletions(-) create mode 100644 proxy/server/buffer.go diff --git a/backend/backend_conn.go b/backend/backend_conn.go index 0b8d53f..1edff0d 100644 --- a/backend/backend_conn.go +++ b/backend/backend_conn.go @@ -304,6 +304,10 @@ func (c *Conn) ReConnectPg() error { if buf[0] == 'Z' { break } + if buf[0] == 'E' { + golog.Error("backend_conn", "reconnnect pg", string(data), 0) + return errors.New(string(data)) + } } c.pkg = mysql.NewPacketIO(conn) diff --git a/etc/he3proxy.yaml b/etc/he3proxy.yaml index 0894ac5..bd85887 100644 --- a/etc/he3proxy.yaml +++ b/etc/he3proxy.yaml @@ -71,6 +71,7 @@ nodes: # metric: load balance by node load # lsn: load balance by speed of node log playback to meet read consistency # cache: prefer lb to data cached node, otherwise change mode to weight + # random: random for a slave node load_balance_mode: weight # [configuration about remote postgres prometheus addr & metrics] diff --git a/proxy/server/buffer.go b/proxy/server/buffer.go new file mode 100644 index 0000000..2557c64 --- /dev/null +++ b/proxy/server/buffer.go @@ -0,0 +1,61 @@ +package server + +import ( + "errors" + "io" +) + +// zero copy +type buffer struct { + reader io.Reader + buf []byte + start int + end int +} + +func newBuffer(reader io.Reader, len int) buffer { + buf := make([]byte, len) + return buffer{reader, buf, 0, 0} +} + +func (b *buffer) Len() int { + return b.end - b.start +} + +//将有用的字节前移 +func (b *buffer) grow() { + if b.start == 0 { + return + } + copy(b.buf, b.buf[b.start:b.end]) + b.end -= b.start + b.start = 0; +} + +//从reader里面读取数据,如果reader阻塞,会发生阻塞 +func (b *buffer) readFromReader() (int, error) { + b.grow() + n, err := b.reader.Read(b.buf[b.end:]) + if (err != nil) { + return n, err + } + b.end += n + return n, nil +} + +//返回n个字节,而不产生移位 +func (b *buffer) seek(n int) ([]byte, error) { + if b.end-b.start >= n { + buf := b.buf[b.start:b.start+n] + return buf, nil + } + return nil, errors.New("not enough") +} + +//舍弃offset个字段,读取n个字段 +func (b *buffer) read(offset, n int) ([]byte) { + b.start += offset + buf := b.buf[b.start:b.start+n] + b.start += n + return buf +} diff --git a/proxy/server/conn_pgsql.go b/proxy/server/conn_pgsql.go index 1548555..28d0025 100644 --- a/proxy/server/conn_pgsql.go +++ b/proxy/server/conn_pgsql.go @@ -133,12 +133,39 @@ func (c *ClientConn) RunPg(ctx context.Context) { c.isInTrxPg = true c.alwaysCurNode = true } + + //zero copy + //var ( + // buf = newBuffer(c.c, mysql.MaxPayloadLen) + // headBuf []byte + // contentSize int + // msg []byte + //) + for { - header, msg, err := c.readPacketPg() + // old method + msg, err := c.readPacketPg() if err != nil { return } + //zero copy + //_, err := buf.readFromReader() + //if err != nil { + // return + //} + //headBuf, err = buf.seek(5) + //if err != nil { + // break + //} + //contentSize = int(binary.BigEndian.Uint32(headBuf[1:]) +1) + //if (buf.Len() >= contentSize) { + // msg = buf.read(0, contentSize) + //}else { + // return + //} + //zero copy end + // reload configuration if c.configVer != c.proxy.configVer { err := c.reloadConfig() @@ -158,7 +185,7 @@ func (c *ClientConn) RunPg(ctx context.Context) { } // handle receive msg - if err = c.dispatchPg(ctx, header, msg); err != nil { + if err = c.dispatchPg(ctx, msg); err != nil { c.proxy.counter.IncrErrLogTotal() if err == io.EOF { continue @@ -185,18 +212,18 @@ func (c *ClientConn) RunPg(ctx context.Context) { // It also gets a token from server which is used to limit the concurrently handling clients. // The most frequently used command is ComQuery. // PostgreSQL Modified -func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte) error { +func (cc *ClientConn) dispatchPg(ctx context.Context, data []byte) error { cc.proxy.counter.IncrClientQPS() - cmd := header[0] + cmd := data[0] var err error //golog.Trace(moduleName, "dispatchPg", "cmd str:"+string(cmd), cc.connectionId) switch cmd { case 'Q': /* simple query */ simpleQuery := pgproto3.Query{} - if err := simpleQuery.Decode(data); err != nil { + if err = simpleQuery.Decode(data[5:]); err != nil { return err } - err = cc.handleQueryPg(ctx, simpleQuery.String, BytesCombine(header, data)) + err = cc.handleQueryPg(ctx, simpleQuery.String, data) return err /* extend query protocol, msg send in sequence @@ -222,11 +249,13 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte */ case 'P': /* parse */ parse := pgproto3.Parse{} - if err := parse.Decode(data); err != nil { + if err := parse.Decode(data[5:]); err != nil { return err } // save parse name, use for delete parse when retrieve to connection pool - cc.Parse.Store(parse.Name, parse) + if !config.ReadOnly{ + cc.Parse.Store(parse.Name, parse) + } sql := parse.Query if cc.backendConn == nil || cc.backendConn.Conn == nil { // parse phase will reuse connect session, if exec select first than exec insert will get an error. @@ -248,8 +277,11 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte fmt.Sprintf("exec sql [%s] by node [%s]", sql, cc.backendConn.GetAddr()), cc.connectionId, "dbname", cc.db) } } + // save conn info to map, use for cancel request. if cc != nil && cc.backendConn != nil { - clientConnMap.Store(cc.connectionId, cc) + if config.CancelReq { + clientConnMap.Store(cc.connectionId, cc) + } } } if cc == nil || cc.backendConn == nil || err != nil { @@ -257,15 +289,10 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte return nil } - // save conn info to map, use for cancel request. - clientConnMap.Store(cc.connectionId, cc) - // packaging send msg - cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) return nil case 'B', 'D', 'E', 'd': /* bind */ /* describe */ /* execute */ /* copy data */ - cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) if (len(cc.dataSend) + 1<<12) > mysql.MaxPayloadLen { err = cc.handleStmtSyncPg(ctx) @@ -276,7 +303,7 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte case 'F': /* fastpath function call */ case 'C': /* close */ c := pgproto3.Close{} - if err := c.Decode(data); err != nil { + if err := c.Decode(data[5:]); err != nil { return err } err = cc.handleStmtClosePg(ctx, c) @@ -284,7 +311,6 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte case 'H': /* flush */ // return cc.flush(ctx) case 'S': /* sync */ - cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) err = cc.handleStmtSyncPg(ctx) cc.dataSend = cc.dataSend[:0] @@ -292,13 +318,11 @@ func (cc *ClientConn) dispatchPg(ctx context.Context, header []byte, data []byte case 'X': /*Client Terminate*/ return io.EOF case 'c': /* copy done */ - cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) err = cc.handleCopy(ctx) cc.dataSend = cc.dataSend[:0] return err case 'f': /* copy fail */ - cc.dataSend = BytesCombine(cc.dataSend, header) cc.dataSend = BytesCombine(cc.dataSend, data) err = cc.handleCopy(ctx) cc.dataSend = cc.dataSend[:0] @@ -1139,12 +1163,12 @@ func (cc *ClientConn) DoAuth(ctx context.Context, auth []byte) error { // format: 'p' + len + 'password' + '0' // length = len + password + 1 - header, msg, err := cc.readPacketPg() + msg, err := cc.readPacketPg() if err != nil { return err } - if header[0] != 'p' { + if msg[0] != 'p' { return errors.ErrFormatStr("received is not a password packet" + string(auth[0])) } @@ -1191,21 +1215,34 @@ func getRolPwdFromDB(addr string, rolname string) (string, error) { } // readPacket Read general messages of postgresql protocol -func (cc *ClientConn) readPacketPg() ([]byte, []byte, error) { - header := make([]byte, 5) - if _, err := io.ReadFull(cc.pkg.Rb, header); err != nil { - return nil, nil, err +func (cc *ClientConn) readPacketPg() ([]byte, error) { + //header := make([]byte, 5) + //if _, err := io.ReadFull(cc.pkg.Rb, header); err != nil { + // return nil, nil, err + //} + header, err := cc.pkg.Rb.Peek(5) + if err != nil { + return nil, err } - msgLen := int(binary.BigEndian.Uint32(header[1:]) - 4) + + for len(header)<5{ + time.Sleep(2*time.Microsecond) + header, err = cc.pkg.Rb.Peek(5) + if err != nil { + return nil, err + } + } + + msgLen := int(binary.BigEndian.Uint32(header[1:]) +1) if msgLen > cap(cc.dataRecv) { cc.dataRecv = make([]byte, msgLen) } else { cc.dataRecv = cc.dataRecv[:msgLen] } - if _, err := io.ReadAtLeast(cc.pkg.Rb, cc.dataRecv, msgLen); err != nil { - return header, nil, err + if _, err = io.ReadAtLeast(cc.pkg.Rb, cc.dataRecv, msgLen); err != nil { + return nil, err } - return header, cc.dataRecv, nil + return cc.dataRecv, nil } // writeAuthenticationOK -- Gitee