From d724b857eb796b9aff92e7e5efa5b97cd5783ef1 Mon Sep 17 00:00:00 2001 From: stefan miller Date: Sun, 12 May 2024 14:21:17 +0200 Subject: [PATCH] v1.8.26 --- RELEASENOTES.md | 3 +- driver/authattrs.go | 39 +++- driver/connection.go | 187 +++++++++-------- driver/connector.go | 13 +- driver/driver.go | 6 +- driver/internal/protocol/auth/scram.go | 37 ++-- .../protocol/auth/scrampbkdf2sha256.go | 38 ++-- driver/internal/protocol/auth/scramsha256.go | 37 ++-- driver/internal/protocol/auth/scrum_test.go | 5 +- driver/internal/protocol/cache/list.go | 60 ++++++ driver/internal/protocol/decode.go | 10 +- driver/internal/protocol/lob.go | 190 ++++++++---------- driver/internal/protocol/parameter.go | 8 +- driver/internal/protocol/partkind.go | 4 +- driver/internal/protocol/parts.go | 12 +- driver/internal/protocol/protocol.go | 151 ++++++++------ driver/internal/protocol/result.go | 8 +- driver/internal/protocol/rowsaffected.go | 13 +- driver/internal/protocol/x_stringer.go | 6 +- driver/lob.go | 20 +- driver/main_test.go | 26 ++- driver/metrics.go | 17 +- driver/sniffer.go | 7 +- driver/stmt.go | 2 +- driver/unicode/cesu8/cesu8.go | 3 +- go.mod | 3 +- go.sum | 12 +- 27 files changed, 535 insertions(+), 382 deletions(-) create mode 100644 driver/internal/protocol/cache/list.go diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 721135d5..bbe6785d 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -5,9 +5,10 @@ Release Notes ### Minor revisions -#### v1.8.22 - v1.8.25 +#### v1.8.22 - v1.8.26 - updated dependencies - source code cleanups +- performance improvements #### v1.8.21 - experimental statement metadata diff --git a/driver/authattrs.go b/driver/authattrs.go index 80437563..5af5c9fc 100644 --- a/driver/authattrs.go +++ b/driver/authattrs.go @@ -1,6 +1,8 @@ package driver import ( + "os" + "path" "strings" "sync" "sync/atomic" @@ -9,12 +11,33 @@ import ( "github.com/SAP/go-hdb/driver/internal/protocol/auth" ) +type certKeyFiles struct { + certFile, keyFile string +} + +func newCertKeyFiles(certFile, keyFile string) *certKeyFiles { + return &certKeyFiles{certFile: path.Clean(certFile), keyFile: path.Clean(keyFile)} +} + +func (f *certKeyFiles) read() ([]byte, []byte, error) { + cert, err := os.ReadFile(f.certFile) + if err != nil { + return nil, nil, err + } + key, err := os.ReadFile(f.keyFile) + if err != nil { + return nil, nil, err + } + return cert, key, nil +} + // authAttrs is holding authentication relevant attributes. type authAttrs struct { hasCookie atomic.Bool version atomic.Uint64 // auth attributes version mu sync.RWMutex - _username, _password string // basic authentication + _username, _password string // basic authentication + _certKeyFiles *certKeyFiles _certKey *auth.CertKey // X509 _token string // JWT _logonname string // session cookie login does need logon name provided by JWT authentication. @@ -122,7 +145,19 @@ func (c *authAttrs) refresh() error { } } } - if c._refreshClientCert != nil { + switch { + case c._certKeyFiles != nil && c._refreshClientCert == nil: + if clientCert, clientKey, err := c._certKeyFiles.read(); err != nil { + if c._certKey == nil || !c._certKey.Equal(clientCert, clientKey) { + certKey, err := auth.NewCertKey(clientCert, clientKey) + if err != nil { + return err + } + c._certKey = certKey + c.version.Add(1) + } + } + case c._refreshClientCert != nil: if clientCert, clientKey, ok := c.callRefreshClientCertWithLock(c._refreshClientCert); ok { if c._certKey == nil || !c._certKey.Equal(clientCert, clientKey) { certKey, err := auth.NewCertKey(clientCert, clientKey) diff --git a/driver/connection.go b/driver/connection.go index bf46f2d1..9d989442 100644 --- a/driver/connection.go +++ b/driver/connection.go @@ -9,9 +9,11 @@ import ( "errors" "flag" "fmt" + "io" "log/slog" "net" "regexp" + "runtime/pprof" "strconv" "strings" "sync" @@ -56,6 +58,10 @@ var ( _ = p.RegisterScanType(p.DtLob, hdbreflect.TypeFor[Lob](), hdbreflect.TypeFor[NullLob]()) ) +var ( + cpuProfile = false +) + // dbConn wraps the database tcp connection. It sets timeouts and handles driver ErrBadConn behavior. type dbConn struct { metrics *metrics @@ -111,6 +117,24 @@ func (c *dbConn) Write(b []byte) (int, error) { return n, nil } +type profileDBConn struct { + *dbConn +} + +func (c *profileDBConn) Read(b []byte) (n int, err error) { + pprof.Do(context.Background(), pprof.Labels("db", "read"), func(ctx context.Context) { + n, err = c.dbConn.Read(b) + }) + return +} + +func (c *profileDBConn) Write(b []byte) (n int, err error) { + pprof.Do(context.Background(), pprof.Labels("db", "write"), func(ctx context.Context) { + n, err = c.dbConn.Write(b) + }) + return +} + // check if conn implements all required interfaces. var ( _ driver.Conn = (*conn)(nil) @@ -257,14 +281,15 @@ var ( sqlTrace atomic.Bool ) -func init() { - setTrace := func(b *atomic.Bool, s string) error { - v, err := strconv.ParseBool(s) - if err == nil { - b.Store(v) - } - return err +func setTrace(b *atomic.Bool, s string) error { + v, err := strconv.ParseBool(s) + if err == nil { + b.Store(v) } + return err +} + +func init() { flag.BoolFunc("hdb.protTrace", "enabling hdb protocol trace", func(s string) error { return setTrace(&protTrace, s) }) flag.BoolFunc("hdb.sqlTrace", "enabling hdb sql trace", func(s string) error { return setTrace(&sqlTrace, s) }) } @@ -278,6 +303,14 @@ func SetSQLTrace(on bool) { sqlTrace.Store(on) } // unique connection number. var connNo atomic.Uint64 +func bufferedReaderWriter(dbConn *dbConn, bufferSize int) (*bufio.Reader, *bufio.Writer) { + if cpuProfile { + profileDBConn := &profileDBConn{dbConn: dbConn} + return bufio.NewReaderSize(profileDBConn, bufferSize), bufio.NewWriterSize(profileDBConn, bufferSize) + } + return bufio.NewReaderSize(dbConn, bufferSize), bufio.NewWriterSize(dbConn, bufferSize) +} + func newConn(ctx context.Context, host string, metrics *metrics, attrs *connAttrs) (*conn, error) { netConn, err := attrs._dialer.DialContext(ctx, host, dial.DialerOptions{Timeout: attrs._timeout, TCPKeepAlive: attrs._tcpKeepAlive}) if err != nil { @@ -292,19 +325,17 @@ func newConn(ctx context.Context, host string, metrics *metrics, attrs *connAttr } logger := attrs._logger.With(slog.Uint64("conn", connNo.Add(1))) + protTrace := protTrace.Load() dbConn := &dbConn{metrics: metrics, conn: netConn, timeout: attrs._timeout, logger: logger} - // buffer connection - rw := bufio.NewReadWriter(bufio.NewReaderSize(dbConn, attrs._bufferSize), bufio.NewWriterSize(dbConn, attrs._bufferSize)) + reader, writer := bufferedReaderWriter(dbConn, attrs._bufferSize) - protTrace := protTrace.Load() - - enc := encoding.NewEncoder(rw.Writer, attrs._cesu8Encoder) - dec := encoding.NewDecoder(rw.Reader, attrs._cesu8Decoder) + enc := encoding.NewEncoder(writer, attrs._cesu8Encoder) + dec := encoding.NewDecoder(reader, attrs._cesu8Decoder) c := &conn{attrs: attrs, metrics: metrics, dbConn: dbConn, sqlTrace: sqlTrace.Load(), logger: logger, dec: dec, sessionID: defaultSessionID} - c.pw = p.NewWriter(rw.Writer, enc, protTrace, logger, attrs._cesu8Encoder, attrs._sessionVariables) // write upstream - c.pr = p.NewDBReader(dec, c.readLob, protTrace, logger) // read downstream + c.pw = p.NewWriter(writer, enc, protTrace, logger, attrs._cesu8Encoder, attrs._sessionVariables) // write upstream + c.pr = p.NewDBReader(dec, c.readLob, protTrace, logger, attrs._lobChunkSize) // read downstream if err := c.pw.WriteProlog(ctx); err != nil { dbConn.close() @@ -705,9 +736,9 @@ func (c *conn) dbConnectInfo(ctx context.Context, databaseName string) (*DBConne return nil, err } - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + if _, err := c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { if kind == p.PkDBConnectInfo { - read(ci) + readFn(ci) } }); err != nil { return nil, err @@ -742,9 +773,9 @@ func (c *conn) authenticate(ctx context.Context, authHnd *p.AuthHnd, attrs *conn if err != nil { return 0, nil, err } - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + if _, err := c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { if kind == p.PkAuthentication { - read(initReply) + readFn(initReply) } }); err != nil { return 0, nil, err @@ -780,14 +811,14 @@ func (c *conn) authenticate(ctx context.Context, authHnd *p.AuthHnd, attrs *conn ti := new(p.TopologyInformation) - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + if _, err := c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { switch kind { case p.PkAuthentication: - read(finalReply) + readFn(finalReply) case p.PkConnectOptions: - read(co) + readFn(co) case p.PkTopologyInformation: - read(ti) + readFn(ti) } }); err != nil { return 0, nil, err @@ -809,16 +840,16 @@ func (c *conn) queryDirect(ctx context.Context, query string, commit bool) (driv meta := &p.ResultMetadata{} resSet := &p.Resultset{} - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + if _, err := c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { switch kind { case p.PkResultMetadata: - read(meta) + readFn(meta) qr.fields = meta.ResultFields case p.PkResultsetID: - read((*p.ResultsetID)(&qr.rsID)) + readFn((*p.ResultsetID)(&qr.rsID)) case p.PkResultset: resSet.ResultFields = qr.fields - read(resSet) + readFn(resSet) qr.fieldValues = resSet.FieldValues qr.decodeErrors = resSet.DecodeErrors qr.attrs = attrs @@ -839,14 +870,8 @@ func (c *conn) execDirect(ctx context.Context, query string, commit bool) (drive return nil, err } - rows := &p.RowsAffected{} - var numRow int64 - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { - if kind == p.PkRowsAffected { - read(rows) - numRow = rows.Total() - } - }); err != nil { + numRow, err := c.pr.IterateParts(ctx, 0, nil) + if err != nil { return nil, err } if c.pr.FunctionCode() == p.FcDDL { @@ -866,15 +891,15 @@ func (c *conn) prepare(ctx context.Context, query string) (*prepareResult, error resMeta := &p.ResultMetadata{} prmMeta := &p.ParameterMetadata{} - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + if _, err := c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { switch kind { case p.PkStatementID: - read((*p.StatementID)(&pr.stmtID)) + readFn((*p.StatementID)(&pr.stmtID)) case p.PkResultMetadata: - read(resMeta) + readFn(resMeta) pr.resultFields = resMeta.ResultFields case p.PkParameterMetadata: - read(prmMeta) + readFn(prmMeta) pr.parameterFields = prmMeta.ParameterFields } }); err != nil { @@ -903,13 +928,13 @@ func (c *conn) query(ctx context.Context, pr *prepareResult, nvargs []driver.Nam qr := &queryResult{conn: c, fields: pr.resultFields} resSet := &p.Resultset{} - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + if _, err := c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { switch kind { case p.PkResultsetID: - read((*p.ResultsetID)(&qr.rsID)) + readFn((*p.ResultsetID)(&qr.rsID)) case p.PkResultset: resSet.ResultFields = qr.fields - read(resSet) + readFn(resSet) qr.fieldValues = resSet.FieldValues qr.decodeErrors = resSet.DecodeErrors qr.attrs = attrs @@ -923,7 +948,7 @@ func (c *conn) query(ctx context.Context, pr *prepareResult, nvargs []driver.Nam return qr, nil } -func (c *conn) exec(ctx context.Context, pr *prepareResult, nvargs []driver.NamedValue, commit bool, ofs int) (driver.Result, error) { +func (c *conn) exec(ctx context.Context, pr *prepareResult, nvargs []driver.NamedValue, commit bool, offset int) (driver.Result, error) { inputParameters, err := p.NewInputParameters(pr.parameterFields, nvargs) if err != nil { return nil, err @@ -932,21 +957,16 @@ func (c *conn) exec(ctx context.Context, pr *prepareResult, nvargs []driver.Name return nil, err } - rows := &p.RowsAffected{Ofs: ofs} var ids []p.LocatorID lobReply := &p.WriteLobReply{} - var rowsAffected int64 - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { - switch kind { - case p.PkRowsAffected: - read(rows) - rowsAffected = rows.Total() - case p.PkWriteLobReply: - read(lobReply) + numRow, err := c.pr.IterateParts(ctx, offset, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { + if kind == p.PkWriteLobReply { + readFn(lobReply) ids = lobReply.IDs } - }); err != nil { + }) + if err != nil { return nil, err } fc := c.pr.FunctionCode() @@ -962,7 +982,7 @@ func (c *conn) exec(ctx context.Context, pr *prepareResult, nvargs []driver.Name write lob data only for the last record as lob streaming is only available for the last one */ startLastRec := len(nvargs) - len(pr.parameterFields) - if err := c.writeLobs(nil, ids, pr.parameterFields, nvargs[startLastRec:]); err != nil { + if err := c.writeLobs(ctx, nil, ids, pr.parameterFields, nvargs[startLastRec:]); err != nil { return nil, err } } @@ -970,30 +990,25 @@ func (c *conn) exec(ctx context.Context, pr *prepareResult, nvargs []driver.Name if fc == p.FcDDL { return driver.ResultNoRows, nil } - return driver.RowsAffected(rowsAffected), nil + return driver.RowsAffected(numRow), nil } func (c *conn) execCall(ctx context.Context, outputFields []*p.ParameterField) (*callResult, []p.LocatorID, int64, error) { cr := &callResult{conn: c, outputFields: outputFields} var qr *queryResult - rows := &p.RowsAffected{} var ids []p.LocatorID outPrms := &p.OutputParameters{} meta := &p.ResultMetadata{} resSet := &p.Resultset{} lobReply := &p.WriteLobReply{} - var numRow int64 tableRowIdx := 0 - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + numRow, err := c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { switch kind { - case p.PkRowsAffected: - read(rows) - numRow = rows.Total() case p.PkOutputParameters: outPrms.OutputFields = cr.outputFields - read(outPrms) + readFn(outPrms) cr.fieldValues = outPrms.FieldValues cr.decodeErrors = outPrms.DecodeErrors case p.PkResultMetadata: @@ -1008,21 +1023,22 @@ func (c *conn) execCall(ctx context.Context, outputFields []*p.ParameterField) ( cr.outputFields = append(cr.outputFields, p.NewTableRowsParameterField(tableRowIdx)) cr.fieldValues = append(cr.fieldValues, qr) tableRowIdx++ - read(meta) + readFn(meta) qr.fields = meta.ResultFields case p.PkResultset: resSet.ResultFields = qr.fields - read(resSet) + readFn(resSet) qr.fieldValues = resSet.FieldValues qr.decodeErrors = resSet.DecodeErrors qr.attrs = attrs case p.PkResultsetID: - read((*p.ResultsetID)(&qr.rsID)) + readFn((*p.ResultsetID)(&qr.rsID)) case p.PkWriteLobReply: - read(lobReply) + readFn(lobReply) ids = lobReply.IDs } - }); err != nil { + }) + if err != nil { return nil, nil, 0, err } return cr, ids, numRow, nil @@ -1037,14 +1053,15 @@ func (c *conn) fetchNext(ctx context.Context, qr *queryResult) error { resSet := &p.Resultset{ResultFields: qr.fields, FieldValues: qr.fieldValues} // reuse field values - return c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + _, err := c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { if kind == p.PkResultset { - read(resSet) + readFn(resSet) qr.fieldValues = resSet.FieldValues qr.decodeErrors = resSet.DecodeErrors qr.attrs = attrs } }) + return err } func (c *conn) dropStatementID(ctx context.Context, id uint64) error { @@ -1108,33 +1125,28 @@ read lob reply - seems like readLobreply returns only a result for one lob - even if more then one is requested --> read single lobs */ -func (c *conn) readLob(lobRequest *p.ReadLobRequest, lobReply *p.ReadLobReply) error { +func (c *conn) readLob(request *p.ReadLobRequest, reply *p.ReadLobReply) error { defer c.addSQLTimeValue(time.Now(), sqlTimeFetchLob) - lobRequest.SetChunkSize(c.attrs._lobChunkSize) ctx := context.Background() - for { - if err := c.pw.Write(ctx, c.sessionID, p.MtWriteLob, false, lobRequest); err != nil { + var err error + for err != io.EOF { //nolint: errorlint + if err = c.pw.Write(ctx, c.sessionID, p.MtWriteLob, false, request); err != nil { return err } - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + if _, err = c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { if kind == p.PkReadLobReply { - read(lobReply) + readFn(reply) } }); err != nil { return err } - numChar, err := lobReply.Write() - if err != nil { + _, err = reply.Write() + if err != nil && err != io.EOF { //nolint: errorlint return err } - - if lobReply.IsLastData() { - break - } - lobRequest.AddOfs(numChar) } return nil } @@ -1146,7 +1158,7 @@ func assertEqual[T comparable](s string, a, b T) { } // writeLobs writes input lob parameters to db. -func (c *conn) writeLobs(cr *callResult, ids []p.LocatorID, inPrmFields []*p.ParameterField, nvargs []driver.NamedValue) error { +func (c *conn) writeLobs(ctx context.Context, cr *callResult, ids []p.LocatorID, inPrmFields []*p.ParameterField, nvargs []driver.NamedValue) error { assertEqual("lob streaming can only be done for one (the last) record", len(inPrmFields), len(nvargs)) descrs := make([]*p.WriteLobDescr, 0, len(ids)) @@ -1168,9 +1180,6 @@ func (c *conn) writeLobs(cr *callResult, ids []p.LocatorID, inPrmFields []*p.Par } writeLobRequest := &p.WriteLobRequest{} - - ctx := context.Background() - for len(descrs) != 0 { if len(descrs) != len(ids) { @@ -1198,15 +1207,15 @@ func (c *conn) writeLobs(cr *callResult, ids []p.LocatorID, inPrmFields []*p.Par lobReply := &p.WriteLobReply{} outPrms := &p.OutputParameters{} - if err := c.pr.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) { + if _, err := c.pr.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, readFn func(part p.Part)) { switch kind { case p.PkOutputParameters: outPrms.OutputFields = cr.outputFields - read(outPrms) + readFn(outPrms) cr.fieldValues = outPrms.FieldValues cr.decodeErrors = outPrms.DecodeErrors case p.PkWriteLobReply: - read(lobReply) + readFn(lobReply) ids = lobReply.IDs } }); err != nil { diff --git a/driver/connector.go b/driver/connector.go index 61f05ed9..8926145d 100644 --- a/driver/connector.go +++ b/driver/connector.go @@ -3,8 +3,6 @@ package driver import ( "context" "database/sql/driver" - "os" - "path" "sync" "github.com/SAP/go-hdb/driver/internal/protocol/auth" @@ -62,15 +60,18 @@ func NewX509AuthConnector(host string, clientCert, clientKey []byte) (*Connector // NewX509AuthConnectorByFiles creates a connector for X509 (client certificate) authentication // based on client certificate and client key files. func NewX509AuthConnectorByFiles(host, clientCertFile, clientKeyFile string) (*Connector, error) { - clientCert, err := os.ReadFile(path.Clean(clientCertFile)) + c := NewConnector() + c._host = host + + c._certKeyFiles = newCertKeyFiles(clientCertFile, clientKeyFile) + clientCert, clientKey, err := c._certKeyFiles.read() if err != nil { return nil, err } - clientKey, err := os.ReadFile(path.Clean(clientKeyFile)) - if err != nil { + if c._certKey, err = auth.NewCertKey(clientCert, clientKey); err != nil { return nil, err } - return NewX509AuthConnector(host, clientCert, clientKey) + return c, nil } // NewJWTAuthConnector creates a connector for token (JWT) based authentication. diff --git a/driver/driver.go b/driver/driver.go index 3b5a3ec1..6a4d29cb 100644 --- a/driver/driver.go +++ b/driver/driver.go @@ -10,7 +10,7 @@ import ( ) // DriverVersion is the version number of the hdb driver. -const DriverVersion = "1.8.25" +const DriverVersion = "1.8.26" // DriverName is the driver name to use with sql.Open for hdb databases. const DriverName = "hdb" @@ -31,7 +31,9 @@ var defaultApplicationName, _ = os.Executable() // driver singleton instance. var stdHdbDriver *hdbDriver -func init() { +func init() { register() } + +func register() { // load stats configuration if err := loadStatsCfg(); err != nil { panic(err) // invalid configuration file diff --git a/driver/internal/protocol/auth/scram.go b/driver/internal/protocol/auth/scram.go index c6c8c3a7..680491e5 100644 --- a/driver/internal/protocol/auth/scram.go +++ b/driver/internal/protocol/auth/scram.go @@ -28,13 +28,6 @@ func checkServerChallenge(serverChallenge []byte) error { return nil } -func checkClientProof(clientProof []byte) error { - if len(clientProof) != clientProofSize { - return fmt.Errorf("invalid client proof size %d - expected %d", len(clientProof), clientProofSize) - } - return nil -} - func clientChallenge() []byte { r := make([]byte, clientChallengeSize) if _, err := rand.Read(r); err != nil { @@ -43,17 +36,25 @@ func clientChallenge() []byte { return r } -func clientProof(key, salt, serverChallenge, clientChallenge []byte) []byte { +func clientProof(key, salt, serverChallenge, clientChallenge []byte) ([]byte, error) { + if len(key) != clientProofSize { + return nil, fmt.Errorf("invalid key size %d - expected %d", len(key), clientProofSize) + } sig := _hmac(_sha256(key), salt, serverChallenge, clientChallenge) - proof := xor(sig, key) - return proof + if len(sig) != clientProofSize { + return nil, fmt.Errorf("invalid sig size %d - expected %d", len(key), clientProofSize) + } + // xor sig and key into sig (inline: no further allocation). + for i, v := range key { + sig[i] ^= v + } + return sig, nil } func _sha256(p []byte) []byte { hash := sha256.New() hash.Write(p) - s := hash.Sum(nil) - return s + return hash.Sum(nil) } func _hmac(key []byte, prms ...[]byte) []byte { @@ -61,15 +62,5 @@ func _hmac(key []byte, prms ...[]byte) []byte { for _, p := range prms { hash.Write(p) } - s := hash.Sum(nil) - return s -} - -func xor(sig, key []byte) []byte { - r := make([]byte, len(sig)) - - for i, v := range sig { - r[i] = v ^ key[i] - } - return r + return hash.Sum(nil) } diff --git a/driver/internal/protocol/auth/scrampbkdf2sha256.go b/driver/internal/protocol/auth/scrampbkdf2sha256.go index 05b10c3a..ca06d3a3 100644 --- a/driver/internal/protocol/auth/scrampbkdf2sha256.go +++ b/driver/internal/protocol/auth/scrampbkdf2sha256.go @@ -3,19 +3,30 @@ package auth // Salted Challenge Response Authentication Mechanism (SCRAM) import ( + "bytes" "crypto/sha256" "fmt" + "github.com/SAP/go-hdb/driver/internal/protocol/cache" "golang.org/x/crypto/pbkdf2" ) +func scrampbkdf2sha256Key(password, salt []byte, rounds int) []byte { + return _sha256(pbkdf2.Key(password, salt, rounds, clientProofSize, sha256.New)) +} + +// use cache as key calculation is expensive. +var scrampbkdf2KeyCache = cache.NewList(3, func(k *SCRAMPBKDF2SHA256) []byte { + return scrampbkdf2sha256Key([]byte(k.password), k.salt, int(k.rounds)) +}) + // SCRAMPBKDF2SHA256 implements SCRAMPBKDF2SHA256 authentication. type SCRAMPBKDF2SHA256 struct { - username, password string - clientChallenge []byte - salt, serverChallenge []byte - clientProof, serverProof []byte - rounds uint32 + username, password string + clientChallenge []byte + salt, serverChallenge []byte + serverProof []byte + rounds uint32 } // NewSCRAMPBKDF2SHA256 creates a new authSCRAMPBKDF2SHA256 instance. @@ -27,6 +38,11 @@ func (a *SCRAMPBKDF2SHA256) String() string { return fmt.Sprintf("method type %s clientChallenge %v", a.Typ(), a.clientChallenge) } +// Compare implements cache.Compare interface. +func (a *SCRAMPBKDF2SHA256) Compare(a1 *SCRAMPBKDF2SHA256) bool { + return a.password == a1.password && bytes.Equal(a.salt, a1.salt) && a.rounds == a1.rounds +} + // Typ implements the Method interface. func (a *SCRAMPBKDF2SHA256) Typ() string { return MtSCRAMPBKDF2SHA256 } @@ -63,16 +79,16 @@ func (a *SCRAMPBKDF2SHA256) InitRepDecode(d *Decoder) error { // PrepareFinalReq implements the Method interface. func (a *SCRAMPBKDF2SHA256) PrepareFinalReq(prms *Prms) error { - key := scrampbkdf2sha256Key([]byte(a.password), a.salt, int(a.rounds)) - a.clientProof = clientProof(key, a.salt, a.serverChallenge, a.clientChallenge) - if err := checkClientProof(a.clientProof); err != nil { + key := scrampbkdf2KeyCache.Get(a) + clientProof, err := clientProof(key, a.salt, a.serverChallenge, a.clientChallenge) + if err != nil { return err } prms.AddCESU8String(a.username) prms.addString(a.Typ()) subPrms := prms.addPrms() - subPrms.addBytes(a.clientProof) + subPrms.addBytes(clientProof) return nil } @@ -93,7 +109,3 @@ func (a *SCRAMPBKDF2SHA256) FinalRepDecode(d *Decoder) error { a.serverProof = d.bytes() return nil } - -func scrampbkdf2sha256Key(password, salt []byte, rounds int) []byte { - return _sha256(pbkdf2.Key(password, salt, rounds, clientProofSize, sha256.New)) -} diff --git a/driver/internal/protocol/auth/scramsha256.go b/driver/internal/protocol/auth/scramsha256.go index 6f6f0235..31ceb089 100644 --- a/driver/internal/protocol/auth/scramsha256.go +++ b/driver/internal/protocol/auth/scramsha256.go @@ -3,15 +3,27 @@ package auth // Salted Challenge Response Authentication Mechanism (SCRAM) import ( + "bytes" "fmt" + + "github.com/SAP/go-hdb/driver/internal/protocol/cache" ) +func scramsha256Key(password, salt []byte) []byte { + return _sha256(_hmac(password, salt)) +} + +// use cache as key calculation is expensive. +var scramKeyCache = cache.NewList(3, func(k *SCRAMSHA256) []byte { + return scramsha256Key([]byte(k.password), k.salt) +}) + // SCRAMSHA256 implements SCRAMSHA256 authentication. type SCRAMSHA256 struct { - username, password string - clientChallenge []byte - salt, serverChallenge []byte - clientProof, serverProof []byte + username, password string + clientChallenge []byte + salt, serverChallenge []byte + serverProof []byte } // NewSCRAMSHA256 creates a new authSCRAMSHA256 instance. @@ -23,6 +35,11 @@ func (a *SCRAMSHA256) String() string { return fmt.Sprintf("method type %s clientChallenge %v", a.Typ(), a.clientChallenge) } +// Compare implements cache.Compare interface. +func (a *SCRAMSHA256) Compare(a1 *SCRAMSHA256) bool { + return a.password == a1.password && bytes.Equal(a.salt, a1.salt) +} + // Typ implements the Method interface. func (a *SCRAMSHA256) Typ() string { return MtSCRAMSHA256 } @@ -55,16 +72,16 @@ func (a *SCRAMSHA256) InitRepDecode(d *Decoder) error { // PrepareFinalReq implements the Method interface. func (a *SCRAMSHA256) PrepareFinalReq(prms *Prms) error { - key := scramsha256Key([]byte(a.password), a.salt) - a.clientProof = clientProof(key, a.salt, a.serverChallenge, a.clientChallenge) - if err := checkClientProof(a.clientProof); err != nil { + key := scramKeyCache.Get(a) + clientProof, err := clientProof(key, a.salt, a.serverChallenge, a.clientChallenge) + if err != nil { return err } prms.AddCESU8String(a.username) prms.addString(a.Typ()) subPrms := prms.addPrms() - subPrms.addBytes(a.clientProof) + subPrms.addBytes(clientProof) return nil } @@ -87,7 +104,3 @@ func (a *SCRAMSHA256) FinalRepDecode(d *Decoder) error { a.serverProof = d.bytes() return nil } - -func scramsha256Key(password, salt []byte) []byte { - return _sha256(_hmac(password, salt)) -} diff --git a/driver/internal/protocol/auth/scrum_test.go b/driver/internal/protocol/auth/scrum_test.go index 32015d1c..25f4e54f 100644 --- a/driver/internal/protocol/auth/scrum_test.go +++ b/driver/internal/protocol/auth/scrum_test.go @@ -45,7 +45,10 @@ func TestSCRUM(t *testing.T) { default: t.Fatalf("unknown authentication method %s", r.method) } - clientProof := clientProof(key, r.salt, r.serverChallenge, r.clientChallenge) + clientProof, err := clientProof(key, r.salt, r.serverChallenge, r.clientChallenge) + if err != nil { + t.Fatal(err) + } for i, v := range clientProof { if v != r.clientProof[i] { t.Fatalf("diff index % d - got %v - expected %v", i, clientProof, r.clientProof) diff --git a/driver/internal/protocol/cache/list.go b/driver/internal/protocol/cache/list.go new file mode 100644 index 00000000..fed07c95 --- /dev/null +++ b/driver/internal/protocol/cache/list.go @@ -0,0 +1,60 @@ +// Package cache provides generic cache types. +package cache + +import ( + "sync" +) + +// Comparer is an interface defining a generic compare function. +type Comparer[E any] interface { + Compare(e E) bool +} + +// List is a generic cache list. +type List[K Comparer[K], V any] struct { + maxEntries int + valueFn func(k K) V + mu sync.RWMutex + idx int + keys []K + values []V +} + +// NewList returns a new cache list. +func NewList[K Comparer[K], V any](maxEntries int, valueFn func(k K) V) *List[K, V] { + return &List[K, V]{ + maxEntries: maxEntries, + valueFn: valueFn, + keys: make([]K, 0, maxEntries), + values: make([]V, 0, maxEntries), + } +} + +func (l *List[K, V]) find(k K) (v V, ok bool) { + l.mu.RLock() + defer l.mu.RUnlock() + for i, k1 := range l.keys { + if k1.Compare(k) { + return l.values[i], true + } + } + return +} + +// Get returns the value for the given key. +func (l *List[K, V]) Get(k K) V { + if v, ok := l.find(k); ok { + return v + } + l.mu.Lock() + defer l.mu.Unlock() + v := l.valueFn(k) + l.idx %= l.maxEntries + if l.idx >= len(l.keys) { + l.keys = l.keys[:l.idx+1] + l.values = l.values[:l.idx+1] + } + l.keys[l.idx], l.values[l.idx] = k, v + l.idx++ + return v +} diff --git a/driver/internal/protocol/decode.go b/driver/internal/protocol/decode.go index 6be202e3..c70cc1c6 100644 --- a/driver/internal/protocol/decode.go +++ b/driver/internal/protocol/decode.go @@ -6,7 +6,7 @@ import ( "github.com/SAP/go-hdb/driver/internal/protocol/encoding" ) -func decodeResult(tc typeCode, d *encoding.Decoder, readFn lobReadFn, scale int) (any, error) { //nolint: gocyclo +func decodeResult(tc typeCode, d *encoding.Decoder, readFn lobReadFn, lobChunkSize, scale int) (any, error) { //nolint: gocyclo switch tc { case tcBoolean: return d.BooleanField() @@ -65,14 +65,14 @@ func decodeResult(tc typeCode, d *encoding.Decoder, readFn lobReadFn, scale int) case tcStPoint, tcStGeometry: return d.HexField() case tcBlob, tcClob, tcLocator, tcBintext: - descr := new(lobOutBytesDescr) - if descr.decode(d, readFn) { + descr := newLobOutDescr(nil, readFn, lobChunkSize) + if descr.decode(d) { return nil, nil } return descr, nil case tcText, tcNclob, tcNlocator: - descr := newLobOutCharsDescr(d.Transformer()) - if descr.decode(d, readFn) { + descr := newLobOutDescr(d.Transformer(), readFn, lobChunkSize) + if descr.decode(d) { return nil, nil } return descr, nil diff --git a/driver/internal/protocol/lob.go b/driver/internal/protocol/lob.go index 8f7131ee..a3aabef2 100644 --- a/driver/internal/protocol/lob.go +++ b/driver/internal/protocol/lob.go @@ -6,10 +6,11 @@ import ( "fmt" "io" "slices" + "sync" "unicode/utf8" "github.com/SAP/go-hdb/driver/internal/protocol/encoding" - "github.com/SAP/go-hdb/driver/unicode/cesu8" + "github.com/SAP/go-hdb/driver/internal/unsafe" "golang.org/x/text/transform" ) @@ -75,10 +76,7 @@ type LobScanner interface { Scan(w io.Writer) error } -var ( - _ LobScanner = (*lobOutBytesDescr)(nil) - _ LobScanner = (*lobOutCharsDescr)(nil) -) +var _ LobScanner = (*lobOutDescr)(nil) // LobInDescr represents a lob input descriptor. type LobInDescr struct { @@ -128,12 +126,17 @@ type LocatorID uint64 // byte[locatorIdSize] type lobReadFn func(lobRequest *ReadLobRequest, lobReply *ReadLobReply) error +var lobOutDescrPool = sync.Pool{New: func() any { return new(lobOutDescr) }} + // lobOutDescr represents a lob output descriptor. type lobOutDescr struct { + // if set -> char based + tr transform.Transformer /* readFn is set by decode if additional data packages need to be read (not last data) */ - readFn lobReadFn + readFn lobReadFn + chunkSize int /* HDB does not return lob type code but undefined only --> ltc is always ltcUndefined @@ -145,13 +148,26 @@ type lobOutDescr struct { numByte int64 id LocatorID b []byte + + // scan attributes. + wr io.Writer + lobRequest *ReadLobRequest + lobReply *ReadLobReply +} + +func newLobOutDescr(tr transform.Transformer, readFn lobReadFn, chunkSize int) *lobOutDescr { + descr := lobOutDescrPool.Get().(*lobOutDescr) + descr.tr = tr + descr.readFn = readFn + descr.chunkSize = chunkSize + return descr } func (d *lobOutDescr) String() string { return fmt.Sprintf("typecode %s options %s numChar %d numByte %d id %d bytes %v", d.ltc, d.opt, d.numChar, d.numByte, d.id, d.b) } -func (d *lobOutDescr) decode(dec *encoding.Decoder, readFn lobReadFn) bool { +func (d *lobOutDescr) decode(dec *encoding.Decoder) bool { d.ltc = lobTypecode(dec.Int8()) d.opt = lobOptions(dec.Int8()) if d.opt.isNull() { @@ -162,110 +178,92 @@ func (d *lobOutDescr) decode(dec *encoding.Decoder, readFn lobReadFn) bool { d.numByte = dec.Int64() d.id = LocatorID(dec.Uint64()) size := int(dec.Int32()) - d.b = make([]byte, size) + d.b = slices.Grow(d.b, size)[:size] dec.Bytes(d.b) - // if not last data -> set readFn for scan - if !d.opt.isLastData() { - d.readFn = readFn - } return false } -func (d *lobOutDescr) closePipeWriter(wr io.Writer, err error) { - // if the writer is a pipe-end -> close at the end - if pwr, ok := wr.(*io.PipeWriter); ok { - if err != nil { - pwr.CloseWithError(err) - } else { - pwr.Close() +func (d *lobOutDescr) countChars(b []byte) (numChar int) { + s := unsafe.ByteSlice2String(b) + for _, r := range s { + numChar++ + if utf8.RuneLen(r) == 4 { + numChar++ // caution: hdb counts 2 chars in case of surrogate pair } } + return } -type lobOutBytesDescr struct { - lobOutDescr -} +func (d *lobOutDescr) write(b []byte) (int, error) { + if d.tr == nil { + if _, err := d.wr.Write(b); err != nil { + return len(b), err + } + return len(b), nil + } + d.tr.Reset() + // cesu8 -> utf8 (always enough space) + nDst, _, err := d.tr.Transform(b, b, false) + if err != nil && err != transform.ErrShortSrc { //nolint: errorlint + return nDst, err + } -func (d *lobOutBytesDescr) write(wr io.Writer, b []byte) (int, error) { - if _, err := wr.Write(b); err != nil { - return len(b), err + numChar := d.countChars(b[:nDst]) + if _, err := d.wr.Write(b[:nDst]); err != nil { + return numChar, err } - return len(b), nil + return numChar, nil } -func (d *lobOutBytesDescr) scan(wr io.Writer) error { - if _, err := wr.Write(d.b); err != nil { +func (d *lobOutDescr) scan(wr io.Writer) error { + d.wr = wr + + numChar, err := d.write(d.b) + if err != nil { return err } - if d.readFn == nil { + + if d.opt.isLastData() { return nil } - lobRequest := &ReadLobRequest{ofs: int64(len(d.b)), id: d.id} - lobReply := &ReadLobReply{write: d.write, wr: wr, id: d.id} - return d.readFn(lobRequest, lobReply) + + if d.lobRequest == nil { + d.lobRequest = new(ReadLobRequest) + } + if d.lobReply == nil { + d.lobReply = &ReadLobReply{lobOutDescr: d} + } + d.lobRequest.id = d.id + d.lobRequest.ofs = int64(numChar) + d.lobRequest.chunkSize = d.chunkSize + return d.readFn(d.lobRequest, d.lobReply) } // Scan implements the LobScanner interface. -func (d *lobOutBytesDescr) Scan(wr io.Writer) error { +func (d *lobOutDescr) Scan(wr io.Writer) error { err := d.scan(wr) - d.closePipeWriter(wr, err) - return err -} - -type lobOutCharsDescr struct { - tr transform.Transformer - lobOutDescr -} - -func newLobOutCharsDescr(tr transform.Transformer) *lobOutCharsDescr { - return &lobOutCharsDescr{tr: tr} -} - -func (d *lobOutCharsDescr) countFullChars(b []byte) (size int, numChar int) { - for len(b) > 0 { - r, width := cesu8.DecodeRune(b) - if r == utf8.RuneError { - return // stop if not full rune - } - - size += width - if width == cesu8.CESUMax { - numChar += 2 // caution: hdb counts 2 chars in case of surrogate pair + // if the writer is a pipe-end -> close at the end + if pwr, ok := wr.(*io.PipeWriter); ok { + if err != nil { + pwr.CloseWithError(err) } else { - numChar++ + pwr.Close() } - b = b[width:] - } - return -} - -func (d *lobOutCharsDescr) write(wr io.Writer, b []byte) (int, error) { - size, numChar := d.countFullChars(b) - if _, err := wr.Write(b[:size]); err != nil { - return numChar, err } - return numChar, nil + lobOutDescrPool.Put(d) + return err } -func (d *lobOutCharsDescr) scan(wr io.Writer) error { - wr = transform.NewWriter(wr, d.tr) // CESU8 transformer - size, numChar := d.countFullChars(d.b) - if _, err := wr.Write(d.b[:size]); err != nil { - return err +func (d *lobOutDescr) Write() (int, error) { + n, err := d.write(d.b) + if err != nil { + return n, err } - if d.readFn == nil { - return nil + if d.opt.isLastData() { + return n, io.EOF } - lobRequest := &ReadLobRequest{ofs: int64(numChar), id: d.id} - lobReply := &ReadLobReply{write: d.write, wr: wr, id: d.id} - return d.readFn(lobRequest, lobReply) -} - -// Scan implements the LobScanner interface. -func (d *lobOutCharsDescr) Scan(wr io.Writer) error { - err := d.scan(wr) - d.closePipeWriter(wr, err) - return err + d.lobRequest.ofs += int64(n) + return n, nil } /* @@ -395,24 +393,18 @@ type ReadLobRequest struct { */ id LocatorID ofs int64 - chunkSize int32 + chunkSize int } func (r *ReadLobRequest) String() string { return fmt.Sprintf("id %d offset %d size %d", r.id, r.ofs, r.chunkSize) } -// AddOfs adds n to offset. -func (r *ReadLobRequest) AddOfs(n int) { r.ofs += int64(n) } - -// SetChunkSize sets the chunk size. -func (r *ReadLobRequest) SetChunkSize(size int) { r.chunkSize = int32(size) } - // sniffer. func (r *ReadLobRequest) decode(dec *encoding.Decoder) error { r.id = LocatorID(dec.Uint64()) r.ofs = dec.Int64() - r.chunkSize = dec.Int32() + r.chunkSize = int(dec.Int32()) dec.Skip(4) return nil } @@ -420,28 +412,20 @@ func (r *ReadLobRequest) decode(dec *encoding.Decoder) error { func (r *ReadLobRequest) encode(enc *encoding.Encoder) error { enc.Uint64(uint64(r.id)) enc.Int64(r.ofs + 1) // 1-based - enc.Int32(r.chunkSize) + enc.Int32(int32(r.chunkSize)) enc.Zeroes(4) return nil } // ReadLobReply represents a lob read reply part. type ReadLobReply struct { - id LocatorID - opt lobOptions - b []byte - - write func(wr io.Writer, b []byte) (int, error) - wr io.Writer + *lobOutDescr } func (r *ReadLobReply) String() string { return fmt.Sprintf("id %d options %s bytes %v", r.id, r.opt, r.b) } -// IsLastData returns true in case of last data package read, false otherwise. -func (r *ReadLobReply) IsLastData() bool { return r.opt.isLastData() } - func (r *ReadLobReply) decodeNumArg(dec *encoding.Decoder, numArg int) error { if numArg != 1 { panic("numArg == 1 expected") @@ -457,5 +441,3 @@ func (r *ReadLobReply) decodeNumArg(dec *encoding.Decoder, numArg int) error { dec.Bytes(r.b) return nil } - -func (r *ReadLobReply) Write() (int, error) { return r.write(r.wr, r.b) } diff --git a/driver/internal/protocol/parameter.go b/driver/internal/protocol/parameter.go index 15072a69..2a5a8ff3 100644 --- a/driver/internal/protocol/parameter.go +++ b/driver/internal/protocol/parameter.go @@ -293,8 +293,8 @@ func (f *ParameterField) encodePrm(enc *encoding.Encoder, v any) error { } } -func (f *ParameterField) decodeResult(dec *encoding.Decoder, readFn lobReadFn) (any, error) { - return decodeResult(f.tc, dec, readFn, f.scale) +func (f *ParameterField) decodeResult(dec *encoding.Decoder, readFn lobReadFn, lobChunkSize int) (any, error) { + return decodeResult(f.tc, dec, readFn, lobChunkSize, f.scale) } /* @@ -439,14 +439,14 @@ func (p *OutputParameters) String() string { return fmt.Sprintf("fields %v values %v", p.OutputFields, p.FieldValues) } -func (p *OutputParameters) decodeResult(dec *encoding.Decoder, numArg int, readFn lobReadFn) error { +func (p *OutputParameters) decodeResult(dec *encoding.Decoder, numArg int, readFn lobReadFn, lobChunkSize int) error { cols := len(p.OutputFields) p.FieldValues = resizeSlice(p.FieldValues, numArg*cols) for i := 0; i < numArg; i++ { for j, f := range p.OutputFields { var err error - if p.FieldValues[i*cols+j], err = f.decodeResult(dec, readFn); err != nil { + if p.FieldValues[i*cols+j], err = f.decodeResult(dec, readFn, lobChunkSize); err != nil { p.DecodeErrors = append(p.DecodeErrors, &DecodeError{row: i, fieldName: f.Name(), err: err}) // collect decode / conversion errors } } diff --git a/driver/internal/protocol/partkind.go b/driver/internal/protocol/partkind.go index 673f8c7a..194b811c 100644 --- a/driver/internal/protocol/partkind.go +++ b/driver/internal/protocol/partkind.go @@ -8,10 +8,10 @@ const ( pkNil PartKind = 0 PkCommand PartKind = 3 PkResultset PartKind = 5 - PkError PartKind = 6 + pkError PartKind = 6 PkStatementID PartKind = 10 pkTransactionID PartKind = 11 - PkRowsAffected PartKind = 12 + pkRowsAffected PartKind = 12 PkResultsetID PartKind = 13 PkTopologyInformation PartKind = 15 pkTableLocation PartKind = 16 diff --git a/driver/internal/protocol/parts.go b/driver/internal/protocol/parts.go index 98479e2a..64c19ce9 100644 --- a/driver/internal/protocol/parts.go +++ b/driver/internal/protocol/parts.go @@ -28,7 +28,7 @@ type bufLenPartDecoder interface { } type resultPartDecoder interface { Part - decodeResult(dec *encoding.Decoder, numArg int, readFn lobReadFn) error + decodeResult(dec *encoding.Decoder, numArg int, readFn lobReadFn, lobChunkSize int) error } // partEncoder represents a protocol part the driver is able to encode. @@ -39,7 +39,7 @@ type partEncoder interface { encode(enc *encoding.Encoder) error } -func (*HdbErrors) kind() PartKind { return PkError } +func (*HdbErrors) kind() PartKind { return pkError } func (*AuthInitRequest) kind() PartKind { return PkAuthentication } func (*AuthInitReply) kind() PartKind { return PkAuthentication } func (*AuthFinalRequest) kind() PartKind { return PkAuthentication } @@ -48,7 +48,7 @@ func (ClientID) kind() PartKind { return PkClientID } func (clientInfo) kind() PartKind { return PkClientInfo } func (*TopologyInformation) kind() PartKind { return PkTopologyInformation } func (Command) kind() PartKind { return PkCommand } -func (*RowsAffected) kind() PartKind { return PkRowsAffected } +func (*rowsAffected) kind() PartKind { return pkRowsAffected } func (StatementID) kind() PartKind { return PkStatementID } func (*ParameterMetadata) kind() PartKind { return PkParameterMetadata } func (*InputParameters) kind() PartKind { return PkParameters } @@ -121,7 +121,7 @@ var ( _ numArgPartDecoder = (*clientInfo)(nil) _ numArgPartDecoder = (*TopologyInformation)(nil) _ bufLenPartDecoder = (*Command)(nil) - _ numArgPartDecoder = (*RowsAffected)(nil) + _ numArgPartDecoder = (*rowsAffected)(nil) _ partDecoder = (*StatementID)(nil) _ numArgPartDecoder = (*ParameterMetadata)(nil) _ numArgPartDecoder = (*InputParameters)(nil) @@ -142,12 +142,12 @@ var ( ) var genPartTypeMap = map[PartKind]reflect.Type{ - PkError: hdbreflect.TypeFor[HdbErrors](), + pkError: hdbreflect.TypeFor[HdbErrors](), PkClientID: hdbreflect.TypeFor[ClientID](), PkClientInfo: hdbreflect.TypeFor[clientInfo](), PkTopologyInformation: hdbreflect.TypeFor[TopologyInformation](), PkCommand: hdbreflect.TypeFor[Command](), - PkRowsAffected: hdbreflect.TypeFor[RowsAffected](), + pkRowsAffected: hdbreflect.TypeFor[rowsAffected](), PkStatementID: hdbreflect.TypeFor[StatementID](), PkResultsetID: hdbreflect.TypeFor[ResultsetID](), PkFetchSize: hdbreflect.TypeFor[Fetchsize](), diff --git a/driver/internal/protocol/protocol.go b/driver/internal/protocol/protocol.go index a2ac65c7..b78950ae 100644 --- a/driver/internal/protocol/protocol.go +++ b/driver/internal/protocol/protocol.go @@ -53,10 +53,11 @@ func (c *partCache) get(kind PartKind) (Part, bool) { // Reader represents a protocol reader. type Reader struct { - dec *encoding.Decoder - readFn lobReadFn - protTrace bool - logger *slog.Logger + dec *encoding.Decoder + readFn lobReadFn + protTrace bool + logger *slog.Logger + lobChunkSize int prefix string // ReadProlog reads the protocol prolog. @@ -67,39 +68,48 @@ type Reader struct { ph *partHeader partCache partCache + + hdbErrors *HdbErrors + rowsAffected *rowsAffected } -func newReader(dec *encoding.Decoder, readFn lobReadFn, protTrace bool, logger *slog.Logger) *Reader { +func newReader(dec *encoding.Decoder, readFn lobReadFn, protTrace bool, logger *slog.Logger, lobChunkSize int) *Reader { return &Reader{ - dec: dec, - readFn: readFn, - protTrace: protTrace, - logger: logger, - partCache: partCache{}, - mh: &messageHeader{}, - sh: &segmentHeader{}, - ph: &partHeader{}, + dec: dec, + readFn: readFn, + protTrace: protTrace, + logger: logger, + lobChunkSize: lobChunkSize, + partCache: partCache{}, + mh: &messageHeader{}, + sh: &segmentHeader{}, + ph: &partHeader{}, + hdbErrors: &HdbErrors{}, + rowsAffected: &rowsAffected{}, } } // NewDBReader returns an instance of a database protocol reader. -func NewDBReader(dec *encoding.Decoder, readFn lobReadFn, protTrace bool, logger *slog.Logger) *Reader { - reader := newReader(dec, readFn, protTrace, logger) +func NewDBReader(dec *encoding.Decoder, readFn lobReadFn, protTrace bool, logger *slog.Logger, lobChunkSize int) *Reader { + reader := newReader(dec, readFn, protTrace, logger, lobChunkSize) reader.ReadProlog = reader.readPrologDB reader.prefix = prefixDB return reader } // NewClientReader returns an instance of a client protocol reader. -func NewClientReader(dec *encoding.Decoder, readFn lobReadFn, protTrace bool, logger *slog.Logger) *Reader { - reader := newReader(dec, readFn, protTrace, logger) +func NewClientReader(dec *encoding.Decoder, readFn lobReadFn, protTrace bool, logger *slog.Logger, chunkSize int) *Reader { + reader := newReader(dec, readFn, protTrace, logger, chunkSize) reader.ReadProlog = reader.readPrologClient reader.prefix = prefixClient return reader } // SkipParts reads and discards all protocol parts. -func (r *Reader) SkipParts(ctx context.Context) error { return r.IterateParts(ctx, nil) } +func (r *Reader) SkipParts(ctx context.Context) error { + _, err := r.IterateParts(ctx, 0, nil) + return err +} // SessionID returns the session ID. func (r *Reader) SessionID() int64 { return r.mh.sessionID } @@ -146,10 +156,9 @@ func (r *Reader) skipPaddingLastPart(numReadByte int64) { } } -func (r *Reader) readPart(ctx context.Context, part Part) error { +func (r *Reader) readPart(ctx context.Context, part Part) (err error) { cntBefore := r.dec.Cnt() - var err error switch part := part.(type) { // do not return here in case of error -> read stream would be broken case partDecoder: @@ -159,7 +168,7 @@ func (r *Reader) readPart(ctx context.Context, part Part) error { case numArgPartDecoder: err = part.decodeNumArg(r.dec, r.ph.numArg()) case resultPartDecoder: - err = part.decodeResult(r.dec, r.ph.numArg(), r.readFn) + err = part.decodeResult(r.dec, r.ph.numArg(), r.readFn, r.lobChunkSize) default: panic(fmt.Errorf("invalid part decoder %[1]T %[1]v", part)) } @@ -182,12 +191,12 @@ func (r *Reader) readPart(ctx context.Context, part Part) error { } // IterateParts iterates through all protocol parts. -func (r *Reader) IterateParts(ctx context.Context, fn func(kind PartKind, attrs PartAttributes, read func(part Part))) error { - var lastErrors *HdbErrors - var lastRowsAffected *RowsAffected +func (r *Reader) IterateParts(ctx context.Context, offset int, fn func(kind PartKind, attrs PartAttributes, readFn func(part Part))) (int64, error) { + var hdbErrors *HdbErrors + var rowsAffected *rowsAffected if err := r.mh.decode(r.dec); err != nil { - return err + return 0, err } var numReadByte int64 = 0 // header bytes are not calculated in header varPartBytes: start with zero @@ -197,7 +206,7 @@ func (r *Reader) IterateParts(ctx context.Context, fn func(kind PartKind, attrs for i := 0; i < int(r.mh.noOfSegm); i++ { if err := r.sh.decode(r.dec); err != nil { - return err + return 0, err } numReadByte += segmentHeaderSize @@ -209,7 +218,7 @@ func (r *Reader) IterateParts(ctx context.Context, fn func(kind PartKind, attrs lastPart := int(r.sh.noOfParts) - 1 for j := 0; j <= lastPart; j++ { if err := r.ph.decode(r.dec); err != nil { - return err + return 0, err } kind := r.ph.partKind @@ -221,40 +230,43 @@ func (r *Reader) IterateParts(ctx context.Context, fn func(kind PartKind, attrs cntBefore := r.dec.Cnt() - partRequested := false - if kind != PkError && fn != nil { // caller must not handle hdb errors - var err error - fn(kind, r.ph.partAttributes, func(part Part) { - partRequested = true - err = r.readPart(ctx, part) - if part.kind() == PkRowsAffected { - lastRowsAffected = part.(*RowsAffected) + switch kind { + case pkRowsAffected: + if err := r.readPart(ctx, r.rowsAffected); err != nil { + return 0, err + } + rowsAffected = r.rowsAffected + case pkError: + if err := r.readPart(ctx, r.hdbErrors); err != nil { + return 0, err + } + hdbErrors = r.hdbErrors + default: + read := false + // caller must not handle hdb errors and rows affected. + if fn != nil { + var err error + fn(kind, r.ph.partAttributes, func(part Part) { + read = true + err = r.readPart(ctx, part) + }) + if err != nil { + return 0, err } - }) - if err != nil { - return err } - } - if !partRequested { - // if trace is on or mandatory parts need to be read we cannot skip - if !(r.protTrace || kind == PkError || kind == PkRowsAffected) { - r.dec.Skip(int(r.ph.bufferLength)) - } else { - if part, ok := r.partCache.get(kind); ok { - if err := r.readPart(ctx, part); err != nil { - return err - } - switch kind { - case PkError: - lastErrors = part.(*HdbErrors) - case PkRowsAffected: - lastRowsAffected = part.(*RowsAffected) + if !read { + // if trace is on or mandatory parts need to be read we cannot skip + if r.protTrace { + if part, ok := r.partCache.get(kind); ok { + if err := r.readPart(ctx, part); err != nil { + return 0, err + } + } else { + r.dec.Skip(int(r.ph.bufferLength)) + r.logger.LogAttrs(ctx, slog.LevelInfo, traceMsg, slog.String(r.prefix+textSkip, kind.String())) } } else { r.dec.Skip(int(r.ph.bufferLength)) - if r.protTrace { - r.logger.LogAttrs(ctx, slog.LevelInfo, traceMsg, slog.String(r.prefix+textSkip, kind.String())) - } } } } @@ -272,29 +284,34 @@ func (r *Reader) IterateParts(ctx context.Context, fn func(kind PartKind, attrs if err := r.dec.Error(); err != nil { r.dec.ResetError() - return err + return 0, err + } + + var numRow int64 + if rowsAffected != nil { + numRow = rowsAffected.Total() } - if lastErrors == nil { - return nil + if hdbErrors == nil { + return numRow, nil } - if lastRowsAffected != nil { // link statement to error + if rowsAffected != nil { // link statement to error j := 0 - for i, rows := range lastRowsAffected.rows { - if rows == RaExecutionFailed { - lastErrors.setStmtNo(j, lastRowsAffected.Ofs+i) + for i, rows := range rowsAffected.rows { + if rows == raExecutionFailed { + hdbErrors.setStmtNo(j, offset+i) j++ } } } - if lastErrors.onlyWarnings { - for _, err := range lastErrors.errs { + if hdbErrors.onlyWarnings { + for _, err := range hdbErrors.errs { r.logger.LogAttrs(ctx, slog.LevelWarn, err.Error()) } - return nil + return numRow, nil } - return lastErrors + return numRow, hdbErrors } // Writer represents a protocol writer. diff --git a/driver/internal/protocol/result.go b/driver/internal/protocol/result.go index 670a946d..adae696e 100644 --- a/driver/internal/protocol/result.go +++ b/driver/internal/protocol/result.go @@ -125,8 +125,8 @@ func (f *ResultField) decode(dec *encoding.Decoder) { f.names.insert(f.columnDisplayNameOfs) } -func (f *ResultField) decodeResult(dec *encoding.Decoder, readFn lobReadFn) (any, error) { - return decodeResult(f.tc, dec, readFn, f.scale) +func (f *ResultField) decodeResult(dec *encoding.Decoder, readFn lobReadFn, lobChunkSize int) (any, error) { + return decodeResult(f.tc, dec, readFn, lobChunkSize, f.scale) } // ResultMetadata represents the metadata of a set of database result fields. @@ -163,14 +163,14 @@ func (r *Resultset) String() string { return fmt.Sprintf("result fields %v field values %v", r.ResultFields, r.FieldValues) } -func (r *Resultset) decodeResult(dec *encoding.Decoder, numArg int, readFn lobReadFn) error { +func (r *Resultset) decodeResult(dec *encoding.Decoder, numArg int, readFn lobReadFn, lobChunkSize int) error { cols := len(r.ResultFields) r.FieldValues = resizeSlice(r.FieldValues, numArg*cols) for i := 0; i < numArg; i++ { for j, f := range r.ResultFields { var err error - if r.FieldValues[i*cols+j], err = f.decodeResult(dec, readFn); err != nil { + if r.FieldValues[i*cols+j], err = f.decodeResult(dec, readFn, lobChunkSize); err != nil { r.DecodeErrors = append(r.DecodeErrors, &DecodeError{row: i, fieldName: f.Name(), err: err}) // collect decode / conversion errors } } diff --git a/driver/internal/protocol/rowsaffected.go b/driver/internal/protocol/rowsaffected.go index 6975e642..e178f012 100644 --- a/driver/internal/protocol/rowsaffected.go +++ b/driver/internal/protocol/rowsaffected.go @@ -9,20 +9,19 @@ import ( // rows affected. const ( raSuccessNoInfo = -2 - RaExecutionFailed = -3 + raExecutionFailed = -3 ) -// RowsAffected represents a rows affected part. -type RowsAffected struct { - Ofs int +// rowsAffected represents a rows affected part. +type rowsAffected struct { rows []int32 } -func (r RowsAffected) String() string { +func (r rowsAffected) String() string { return fmt.Sprintf("%v", r.rows) } -func (r *RowsAffected) decodeNumArg(dec *encoding.Decoder, numArg int) error { +func (r *rowsAffected) decodeNumArg(dec *encoding.Decoder, numArg int) error { r.rows = resizeSlice(r.rows, numArg) for i := 0; i < numArg; i++ { @@ -32,7 +31,7 @@ func (r *RowsAffected) decodeNumArg(dec *encoding.Decoder, numArg int) error { } // Total return the total number of all affected rows. -func (r RowsAffected) Total() int64 { +func (r rowsAffected) Total() int64 { total := int64(0) for _, rows := range r.rows { if rows > 0 { diff --git a/driver/internal/protocol/x_stringer.go b/driver/internal/protocol/x_stringer.go index 3a358434..3b3a64bd 100644 --- a/driver/internal/protocol/x_stringer.go +++ b/driver/internal/protocol/x_stringer.go @@ -394,10 +394,10 @@ func _() { _ = x[pkNil-0] _ = x[PkCommand-3] _ = x[PkResultset-5] - _ = x[PkError-6] + _ = x[pkError-6] _ = x[PkStatementID-10] _ = x[pkTransactionID-11] - _ = x[PkRowsAffected-12] + _ = x[pkRowsAffected-12] _ = x[PkResultsetID-13] _ = x[PkTopologyInformation-15] _ = x[pkTableLocation-16] @@ -448,7 +448,7 @@ func _() { _ = x[pkSQLReplyOptions-73] } -const _PartKind_name = "pkNilPkCommandPkResultsetPkErrorPkStatementIDpkTransactionIDPkRowsAffectedPkResultsetIDPkTopologyInformationpkTableLocationPkReadLobRequestPkReadLobReplypkAbapIStreampkAbapOStreampkCommandInfoPkWriteLobRequestPkClientContextPkWriteLobReplyPkParametersPkAuthenticationpkSessionContextPkClientIDpkProfilePkStatementContextpkPartitionInformationPkOutputParametersPkConnectOptionspkCommitOptionspkFetchOptionsPkFetchSizePkParameterMetadataPkResultMetadatapkFindLobRequestpkFindLobReplypkItabSHMpkItabChunkMetadatapkItabMetadatapkItabResultChunkPkClientInfopkStreamDatapkOStreamResultpkFDARequestMetadatapkFDAReplyMetadatapkBatchPreparepkBatchExecutePkTransactionFlagspkRowSlotImageParamMetadatapkRowSlotImageResultsetPkDBConnectInfopkLobFlagspkResultsetOptionspkXATransactionInfopkSessionVariablepkWorkLoadReplayContextpkSQLReplyOptions" +const _PartKind_name = "pkNilPkCommandPkResultsetpkErrorPkStatementIDpkTransactionIDpkRowsAffectedPkResultsetIDPkTopologyInformationpkTableLocationPkReadLobRequestPkReadLobReplypkAbapIStreampkAbapOStreampkCommandInfoPkWriteLobRequestPkClientContextPkWriteLobReplyPkParametersPkAuthenticationpkSessionContextPkClientIDpkProfilePkStatementContextpkPartitionInformationPkOutputParametersPkConnectOptionspkCommitOptionspkFetchOptionsPkFetchSizePkParameterMetadataPkResultMetadatapkFindLobRequestpkFindLobReplypkItabSHMpkItabChunkMetadatapkItabMetadatapkItabResultChunkPkClientInfopkStreamDatapkOStreamResultpkFDARequestMetadatapkFDAReplyMetadatapkBatchPreparepkBatchExecutePkTransactionFlagspkRowSlotImageParamMetadatapkRowSlotImageResultsetPkDBConnectInfopkLobFlagspkResultsetOptionspkXATransactionInfopkSessionVariablepkWorkLoadReplayContextpkSQLReplyOptions" var _PartKind_map = map[PartKind]string{ 0: _PartKind_name[0:5], diff --git a/driver/lob.go b/driver/lob.go index eecc3389..7e0269af 100644 --- a/driver/lob.go +++ b/driver/lob.go @@ -6,9 +6,9 @@ import ( "errors" "fmt" "io" - "sync" p "github.com/SAP/go-hdb/driver/internal/protocol" + "github.com/SAP/go-hdb/driver/internal/unsafe" ) func scanLob(src any, wr io.Writer) error { @@ -26,10 +26,6 @@ func scanLob(src any, wr io.Writer) error { return nil } -var bufferPool = sync.Pool{ - New: func() any { return new(bytes.Buffer) }, -} - // ScanLobBytes supports scanning Lob data into a byte slice. // This enables using []byte based custom types for scanning Lobs instead of using a Lob object. // For usage please refer to the example. @@ -37,14 +33,11 @@ func ScanLobBytes(src any, b *[]byte) error { if b == nil { return fmt.Errorf("lob scan error: parameter b %T is nil", b) } - wr := bufferPool.Get().(*bytes.Buffer) - wr.Reset() + wr := new(bytes.Buffer) // cannot pool as we use the underlaying buffer (*). if err := scanLob(src, wr); err != nil { - bufferPool.Put(wr) return err } - *b = wr.Bytes() - bufferPool.Put(wr) + *b = wr.Bytes() // (*) use underlaying buffer. return nil } @@ -55,14 +48,11 @@ func ScanLobString(src any, s *string) error { if s == nil { return fmt.Errorf("lob scan error: parameter s %T is nil", s) } - wr := bufferPool.Get().(*bytes.Buffer) - wr.Reset() + wr := new(bytes.Buffer) // cannot pool as we use the underlaying buffer (*). if err := scanLob(src, wr); err != nil { - bufferPool.Put(wr) return err } - *s = wr.String() - bufferPool.Put(wr) + *s = unsafe.ByteSlice2String(wr.Bytes()) // (*) use underlaying buffer. return nil } diff --git a/driver/main_test.go b/driver/main_test.go index 2766e9e5..ce9a47ed 100644 --- a/driver/main_test.go +++ b/driver/main_test.go @@ -15,6 +15,8 @@ import ( "testing" "text/template" "time" + + "go.uber.org/goleak" ) //go:embed stats.tmpl @@ -107,6 +109,8 @@ func (mt *MainTest) run(m *testing.M, schema string, dk dropKind) (int, error) { db.Close() // close before printing stats + stdHdbDriver.metrics.close() // wait for all pending metrics + t := template.Must(template.New("stats").Parse(statsTemplate)) b := new(bytes.Buffer) if err := t.Execute(b, stdHdbDriver.Stats()); err != nil { @@ -219,6 +223,10 @@ func (mt *MainTest) querySchemasPrefix(db *sql.DB, prefix string) ([]string, err return names, nil } +const ( + cpuProfileName = "test.cpuprofile" +) + func TestMain(m *testing.M) { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) @@ -235,21 +243,31 @@ func TestMain(m *testing.M) { dk = dropKind(i) return nil }) + leak := flag.Bool("leak", false, "enable goleak test") if !flag.Parsed() { flag.Parse() } + flag.Visit(func(f *flag.Flag) { + if f.Name == cpuProfileName { + cpuProfile = true + } + }) + exitCode, err := MT.run(m, *schema, dk) if err != nil { log.Fatal(err) } - /* goleak (https://github.com/uber-go/goleak) test - if err := goleak.Find(); err != nil { - log.Print(err) + if *leak { + // cleanup go-hdb driver. + Unregister() //nolint: errcheck + // goleak (https://github.com/uber-go/goleak) test + if err := goleak.Find(); err != nil { + log.Print(err) + } } - */ os.Exit(exitCode) } diff --git a/driver/metrics.go b/driver/metrics.go index b0b8cd6e..c0073656 100644 --- a/driver/metrics.go +++ b/driver/metrics.go @@ -100,10 +100,11 @@ type sqlTimeMsg struct { const numMetricCollectorCh = 100 type metrics struct { - mu sync.RWMutex - once sync.Once // lazy init - wg *sync.WaitGroup - msgCh chan any + mu sync.RWMutex + once sync.Once // lazy init + wg *sync.WaitGroup + msgCh chan any + closed bool parentMetrics *metrics @@ -161,6 +162,14 @@ func (m *metrics) lazyInit() { } func (m *metrics) close() { + m.mu.Lock() + if m.closed { // make close idempotent + m.mu.Unlock() + return + } + m.closed = true + m.mu.Unlock() + close(m.msgCh) m.wg.Wait() } diff --git a/driver/sniffer.go b/driver/sniffer.go index 10466395..256943d3 100644 --- a/driver/sniffer.go +++ b/driver/sniffer.go @@ -46,7 +46,8 @@ func pipeData(wg *sync.WaitGroup, conn net.Conn, dbConn net.Conn, wr io.Writer) func readMsg(ctx context.Context, prd *p.Reader) error { // TODO complete for non generic parts, see internal/protocol/parts/newGenPartReader for details - return prd.IterateParts(ctx, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) {}) + _, err := prd.IterateParts(ctx, 0, func(kind p.PartKind, attrs p.PartAttributes, read func(part p.Part)) {}) + return err } func logData(ctx context.Context, wg *sync.WaitGroup, prd *p.Reader) { @@ -78,8 +79,8 @@ func (s *Sniffer) Run() error { dbDec := encoding.NewDecoder(dbRd, cesu8.DefaultDecoder) // TODO: replace nil by lob reader - pClientRd := p.NewClientReader(clientDec, nil, true, s.logger) - pDBRd := p.NewDBReader(dbDec, nil, true, s.logger) + pClientRd := p.NewClientReader(clientDec, nil, true, s.logger, defaultLobChunkSize) + pDBRd := p.NewDBReader(dbDec, nil, true, s.logger, defaultLobChunkSize) go logData(ctx, wg, pClientRd) go logData(ctx, wg, pDBRd) diff --git a/driver/stmt.go b/driver/stmt.go index d43acdcb..45762447 100644 --- a/driver/stmt.go +++ b/driver/stmt.go @@ -171,7 +171,7 @@ func (s *stmt) execCall(ctx context.Context, pr *prepareResult, nvargs []driver. - chunkReaders - cr (callResult output parameters are set after all lob input parameters are written) */ - if err := c.writeLobs(cr, ids, callArgs.inFields, callArgs.inArgs); err != nil { + if err := c.writeLobs(ctx, cr, ids, callArgs.inFields, callArgs.inArgs); err != nil { return nil, nil, err } } diff --git a/driver/unicode/cesu8/cesu8.go b/driver/unicode/cesu8/cesu8.go index 0186156f..2dc3824f 100644 --- a/driver/unicode/cesu8/cesu8.go +++ b/driver/unicode/cesu8/cesu8.go @@ -95,8 +95,9 @@ func RuneLen(r rune) int { return 3 case r <= utf8.MaxRune: return CESUMax + default: + return -1 } - return -1 } const ( diff --git a/go.mod b/go.mod index 1b6c07f6..df8d744b 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,8 @@ go 1.21.0 toolchain go1.22.2 require ( - github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_golang v1.19.1 + go.uber.org/goleak v1.3.0 golang.org/x/crypto v0.23.0 golang.org/x/text v0.15.0 ) diff --git a/go.sum b/go.sum index 0dae59f9..a1bb8526 100644 --- a/go.sum +++ b/go.sum @@ -6,14 +6,20 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= github.com/prometheus/procfs v0.14.0 h1:Lw4VdGGoKEZilJsayHf0B+9YgLGREba2C6xr+Fdfq6s= github.com/prometheus/procfs v0.14.0/go.mod h1:XL+Iwz8k8ZabyZfMFHPiilCniixqQarAy5Mu67pHlNQ= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= @@ -22,3 +28,5 @@ golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=