From 3c2b19a728f4bce538ea557986d0842c9fca3462 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 23 Apr 2024 13:37:46 +0200 Subject: [PATCH 01/52] tmp --- stores/metadata.go | 21 +++++++++++++++++++++ stores/migrations/mysql/main/schema.sql | 12 ++++++++++++ stores/migrations/sqlite/main/schema.sql | 7 ++++++- 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index 4d8b6e097..8849e5927 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -115,9 +115,17 @@ type ( Contracts []dbContract `gorm:"many2many:contract_set_contracts;constraint:OnDelete:CASCADE"` } + dbDirectory struct { + Model + + ParentID uint + } + dbObject struct { Model + DBDirectoryID uint + DBBucketID uint `gorm:"index;uniqueIndex:idx_object_bucket;NOT NULL"` DBBucket dbBucket ObjectID string `gorm:"index;uniqueIndex:idx_object_bucket"` @@ -1728,6 +1736,19 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo return deletedSectors, err } +func (s *SQLStore) makeDirsForPath(ctx context.Context, tx *gorm.DB, path string) error { + // Create all directories. + for i := 1; i < len(path); i++ { + if path[i] == '/' { + dir := path[:i+1] + if err := s.db.Create(dbDirectory{}).Error; err != nil { + return fmt.Errorf("failed to create directory %v: %w", dir, err) + } + } + } + return nil +} + func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { // Sanity check input. for _, s := range o.Slabs { diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 446b2a805..75f5d72cf 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -313,11 +313,22 @@ CREATE TABLE `multipart_parts` ( CONSTRAINT `fk_multipart_uploads_parts` FOREIGN KEY (`db_multipart_upload_id`) REFERENCES `multipart_uploads` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; +-- dbDirectory +CREATE TABLE `directories` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `parent_id` bigint unsigned NOT NULL, + `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + KEY `idx_directories_parent_id` (`parent_id`), + UNIQUE KEY `idx_directories_name` (`name`), + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + -- dbObject CREATE TABLE `objects` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `created_at` datetime(3) DEFAULT NULL, `db_bucket_id` bigint unsigned NOT NULL, + `db_directory_id` bigint unsigned NOT NULL, `object_id` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, `key` longblob, `health` double NOT NULL DEFAULT '1', @@ -333,6 +344,7 @@ CREATE TABLE `objects` ( KEY `idx_objects_size` (`size`), KEY `idx_objects_created_at` (`created_at`), CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`) + CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbSetting diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 3fca53a3a..31ccd4c9d 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -44,8 +44,13 @@ CREATE INDEX `idx_contract_set_contracts_db_contract_id` ON `contract_set_contra CREATE TABLE `buckets` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`policy` text,`name` text NOT NULL UNIQUE); CREATE INDEX `idx_buckets_name` ON `buckets`(`name`); +-- dbDirectory +CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`parent_id` integer NOT NULL,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories`(`id`)); +CREATE INDEX `idx_directories_parent_id` ON `directories`(`parent_id`); +CREATE UNIQUE INDEX `idx_directories_name` ON `directories`(`name`); + -- dbObject -CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL,`object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`)); +CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer NOT NULL, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`); CREATE INDEX `idx_objects_db_bucket_id` ON `objects`(`db_bucket_id`); CREATE INDEX `idx_objects_etag` ON `objects`(`etag`); CREATE INDEX `idx_objects_health` ON `objects`(`health`); From 1802ac601795a1641f09e076d73523a38524833b Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 24 Apr 2024 15:08:40 +0200 Subject: [PATCH 02/52] stores: start modifying ObjectEntries --- stores/metadata.go | 248 +++++++++++++++-------- stores/metadata_test.go | 52 +++-- stores/migrations/mysql/main/schema.sql | 2 +- stores/migrations/sqlite/main/schema.sql | 6 +- 4 files changed, 201 insertions(+), 107 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 8849e5927..418411fe2 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -118,6 +118,7 @@ type ( dbDirectory struct { Model + Name string ParentID uint } @@ -249,12 +250,12 @@ type ( // rawObjectMetadata is used for hydrating object metadata. rawObjectMetadata struct { - ETag string - Health float64 - MimeType string - ModTime datetime - Name string - Size int64 + ETag string + Health float64 + MimeType string + ModTime datetime + ObjectName string + Size int64 } ) @@ -312,6 +313,9 @@ func (dbContractSector) TableName() string { return "contract_sectors" } // TableName implements the gorm.Tabler interface. func (dbContractSet) TableName() string { return "contract_sets" } +// TableName implements the gorm.Tabler interface. +func (dbDirectory) TableName() string { return "directories" } + // TableName implements the gorm.Tabler interface. func (dbObject) TableName() string { return "objects" } @@ -426,7 +430,7 @@ func (s dbSlab) convert() (slab object.Slab, err error) { func (raw rawObjectMetadata) convert() api.ObjectMetadata { return newObjectMetadata( - raw.Name, + raw.ObjectName, raw.ETag, raw.MimeType, raw.Health, @@ -1224,64 +1228,77 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort offset = 0 } - indexHint := "" - if !isSQLite(s.db) { - indexHint = "USE INDEX (idx_object_bucket, idx_objects_created_at)" - } + // indexHint := "" + // if !isSQLite(s.db) { + // indexHint = "USE INDEX (idx_object_bucket, idx_objects_created_at)" + // } - onameExpr := fmt.Sprintf("CASE INSTR(SUBSTR(object_id, ?), '/') WHEN 0 THEN %s ELSE %s END", - sqlConcat(s.db, "?", "SUBSTR(object_id, ?)"), - sqlConcat(s.db, "?", "substr(SUBSTR(object_id, ?), 1, INSTR(SUBSTR(object_id, ?), '/'))"), - ) + // onameExpr := fmt.Sprintf("CASE INSTR(SUBSTR(object_id, ?), '/') WHEN 0 THEN %s ELSE %s END", + // sqlConcat(s.db, "?", "SUBSTR(object_id, ?)"), + // sqlConcat(s.db, "?", "substr(SUBSTR(object_id, ?), 1, INSTR(SUBSTR(object_id, ?), '/'))"), + // ) - // build objects query & parameters - objectsQuery := fmt.Sprintf(` -SELECT ETag, ModTime, oname as Name, Size, Health, MimeType -FROM ( - SELECT - ANY_VALUE(etag) AS ETag, - MAX(objects.created_at) AS ModTime, - %s AS oname, - SUM(size) AS Size, - MIN(health) as Health, - ANY_VALUE(mime_type) as MimeType - FROM objects %s - INNER JOIN buckets b ON objects.db_bucket_id = b.id - WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND b.name = ? AND SUBSTR(%s, 1, ?) = ? AND %s != ? - GROUP BY oname -) baseQuery -`, - onameExpr, - indexHint, - onameExpr, - onameExpr, - ) - - if isSQLite(s.db) { - objectsQuery = replaceAnyValue(objectsQuery) + dirID, err := s.dirID(ctx, s.db, path) + if err != nil { + return nil, false, err } + objectsQuery := ` +SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType +FROM objects o +INNER JOIN buckets b ON o.db_bucket_id = b.id +WHERE o.db_directory_id = ? AND b.name = ? + ` + + // // build objects query & parameters + // objectsQuery := fmt.Sprintf(` + //SELECT ETag, ModTime, oname as Name, Size, Health, MimeType + //FROM ( + // SELECT + // ANY_VALUE(etag) AS ETag, + // MAX(objects.created_at) AS ModTime, + // %s AS oname, + // SUM(size) AS Size, + // MIN(health) as Health, + // ANY_VALUE(mime_type) as MimeType + // FROM objects %s + // INNER JOIN buckets b ON objects.db_bucket_id = b.id + // WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND b.name = ? AND SUBSTR(%s, 1, ?) = ? AND %s != ? + // GROUP BY oname + //) baseQuery + //`, + // onameExpr, + // indexHint, + // onameExpr, + // onameExpr, + // ) + + // if isSQLite(s.db) { + // objectsQuery = replaceAnyValue(objectsQuery) + // } + objectsQueryParams := []interface{}{ - utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr + dirID, bucket, + //utf8.RuneCountInString(path) + 1, // onameExpr + //path, utf8.RuneCountInString(path) + 1, // onameExpr + //path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr - path + "%", + //path + "%", - utf8.RuneCountInString(path), // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - path, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - bucket, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? + //utf8.RuneCountInString(path), // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? + //path, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? + //bucket, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr + //utf8.RuneCountInString(path) + 1, // onameExpr + //path, utf8.RuneCountInString(path) + 1, // onameExpr + //path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr - utf8.RuneCountInString(path + prefix), // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - path + prefix, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, // onameExpr - path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr - path, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? + //utf8.RuneCountInString(path + prefix), // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? + //path + prefix, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? + //utf8.RuneCountInString(path) + 1, // onameExpr + //path, utf8.RuneCountInString(path) + 1, // onameExpr + //path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr + //path, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? } // build marker expr @@ -1300,10 +1317,10 @@ FROM ( } if sortDir == api.ObjectSortDirAsc { - markerExpr = "(Health > ? OR (Health = ? AND Name > ?))" + markerExpr = "(Health > ? OR (Health = ? AND ObjectName > ?))" markerParams = []interface{}{markerHealth, markerHealth, marker} } else { - markerExpr = "(Health = ? AND Name > ?) OR Health < ?" + markerExpr = "(Health = ? AND ObjectName > ?) OR Health < ?" markerParams = []interface{}{markerHealth, marker, markerHealth} } case api.ObjectSortBySize: @@ -1317,17 +1334,17 @@ FROM ( } if sortDir == api.ObjectSortDirAsc { - markerExpr = "(Size > ? OR (Size = ? AND Name > ?))" + markerExpr = "(Size > ? OR (Size = ? AND ObjectName > ?))" markerParams = []interface{}{markerSize, markerSize, marker} } else { - markerExpr = "(Size = ? AND Name > ?) OR Size < ?" + markerExpr = "(Size = ? AND ObjectName > ?) OR Size < ?" markerParams = []interface{}{markerSize, marker, markerSize} } case api.ObjectSortByName: if sortDir == api.ObjectSortDirAsc { - markerExpr = "Name > ?" + markerExpr = "ObjectName > ?" } else { - markerExpr = "Name < ?" + markerExpr = "ObjectName < ?" } markerParams = []interface{}{marker} default: @@ -1338,7 +1355,7 @@ FROM ( // build order clause orderByClause := fmt.Sprintf("%s %s", sortBy, sortDir) if sortBy != api.ObjectSortByName { - orderByClause += ", Name" + orderByClause += ", ObjectName" } var rows []rawObjectMetadata @@ -1357,6 +1374,27 @@ FROM ( return } + // fetch directories + var childDirs []dbDirectory + if err := s.db.Find(&childDirs, "parent_id = ?", dirID).Error; err != nil { + return nil, false, err + } + var dirRows []rawObjectMetadata + err = s.db.Raw(` +SELECT ? || d.name || '/' as ObjectName, MAX(o.created_at) as ModTime, SUM(o.size) as Size, MIN(o.health) as Health +FROM objects o +INNER JOIN buckets b ON o.db_bucket_id = b.id +INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(ObjectName)) = ObjectName +WHERE b.name = ? AND d.parent_id = ? +GROUP BY d.id +ORDER BY ObjectName ASC + `, path, bucket, dirID).Scan(&dirRows).Error + if err != nil { + return nil, false, err + } + //dirRow.Name = fmt.Sprintf("%s%s/", path, dir.Name) + rows = append(rows, dirRows...) + // trim last element if we have more if len(rows) == limit { hasMore = true @@ -1736,17 +1774,58 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo return deletedSectors, err } -func (s *SQLStore) makeDirsForPath(ctx context.Context, tx *gorm.DB, path string) error { - // Create all directories. - for i := 1; i < len(path); i++ { - if path[i] == '/' { - dir := path[:i+1] - if err := s.db.Create(dbDirectory{}).Error; err != nil { - return fmt.Errorf("failed to create directory %v: %w", dir, err) - } +func (s *SQLStore) dirID(ctx context.Context, tx *gorm.DB, dirPath string) (uint, error) { + if !strings.HasPrefix(dirPath, "/") { + return 0, fmt.Errorf("path must start with /") + } else if !strings.HasSuffix(dirPath, "/") { + return 0, fmt.Errorf("path must end with /") + } + + dirID := uint(1) + if dirPath == "/" { + return dirID, nil // root dir returned + } + + splitPath := strings.Split(dirPath[1:len(dirPath)-1], "/") + for _, dir := range splitPath[:len(splitPath)-1] { + if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND parent_id = ?", dir, dirID). + Scan(&dirID).Error; err != nil { + return 0, fmt.Errorf("failed to fetch root directory: %w", err) } + fmt.Println("dirID", dirID) } - return nil + return dirID, nil +} + +func (s *SQLStore) makeDirsForPath(ctx context.Context, tx *gorm.DB, path string) (uint, error) { + if !strings.HasPrefix(path, "/") { + return 0, fmt.Errorf("path must start with /") + } + + // Create root dir. + if err := tx.Exec("INSERT INTO directories (id) VALUES (1) ON CONFLICT DO NOTHING").Error; err != nil { + return 0, fmt.Errorf("failed to create root directory: %w", err) + } + dirID := uint(1) + + // Create remaining directories. + splitPath := strings.Split(path[1:], "/") + for _, dir := range splitPath[:len(splitPath)-1] { + fmt.Println("creating", dir) + dbDir := dbDirectory{ + Name: dir, + ParentID: dirID, + } + if err := tx.Clauses(clause.OnConflict{ + Columns: []clause.Column{{Name: "name"}, {Name: "parent_id"}}, + UpdateAll: true, + }). + Create(&dbDir).Error; err != nil { + return 0, fmt.Errorf("failed to create directory %v: %w", dir, err) + } + dirID = dbDir.ID + } + return dirID, nil } func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { @@ -1780,6 +1859,12 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, return fmt.Errorf("UpdateObject: failed to delete object: %w", err) } + // create the dir + dirID, err := s.makeDirsForPath(ctx, tx, path) + if err != nil { + return fmt.Errorf("failed to create directories: %w", err) + } + // Insert a new object. objKey, err := o.Key.MarshalBinary() if err != nil { @@ -1796,12 +1881,13 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, } obj := dbObject{ - DBBucketID: bucketID, - ObjectID: path, - Key: objKey, - Size: o.TotalSize(), - MimeType: mimeType, - Etag: eTag, + DBDirectoryID: dirID, + DBBucketID: bucketID, + ObjectID: path, + Key: objKey, + Size: o.TotalSize(), + MimeType: mimeType, + Etag: eTag, } err = tx.Create(&obj).Error if err != nil { @@ -2958,14 +3044,14 @@ func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sort } var rows []rawObjectMetadata if err := s.db. - Select("o.object_id as Name, o.size as Size, o.health as Health, o.mime_type as MimeType, o.created_at as ModTime, o.etag as ETag"). + Select("o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType, o.created_at as ModTime, o.etag as ETag"). Model(&dbObject{}). Table("objects o"). Joins("INNER JOIN buckets b ON o.db_bucket_id = b.id"). Where("b.name = ? AND ? AND ?", bucket, prefixExpr, markerExpr). Order(orderBy). Order(markerOrderBy). - Order("Name ASC"). + Order("ObjectName ASC"). Limit(int(limit)). Scan(&rows).Error; err != nil { return api.ObjectsListResponse{}, err @@ -2976,7 +3062,7 @@ func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sort if len(rows) == limit { hasMore = true rows = rows[:len(rows)-1] - nextMarker = rows[len(rows)-1].Name + nextMarker = rows[len(rows)-1].ObjectName } var objects []api.ObjectMetadata diff --git a/stores/metadata_test.go b/stores/metadata_test.go index aa12e3fb8..97c0c2b3f 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1443,9 +1443,16 @@ func TestObjectEntries(t *testing.T) { } } + var dirs []dbDirectory + ss.db.Find(&dirs) + for i, d := range dirs { + fmt.Println(i, d.ID, d.Name, d.ParentID) + } + // assertMetadata asserts both ModTime, MimeType and ETag and clears them so the // entries are ready for comparison assertMetadata := func(entries []api.ObjectMetadata) { + t.Helper() for i := range entries { // assert mod time if !strings.HasSuffix(entries[i].Name, "/") && entries[i].ModTime.IsZero() { @@ -1454,14 +1461,15 @@ func TestObjectEntries(t *testing.T) { entries[i].ModTime = api.TimeRFC3339{} // assert mime type - if entries[i].MimeType != testMimeType { + isDir := strings.HasPrefix(entries[i].Name, "/") + if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType != testMimeType) { t.Fatal("unexpected mime type", entries[i].MimeType) } entries[i].MimeType = "" // assert etag - if entries[i].ETag == "" { - t.Fatal("etag should be set") + if isDir != (entries[i].ETag == "") { + t.Fatal("etag should be set for files and empty for dirs") } entries[i].ETag = "" } @@ -1488,25 +1496,25 @@ func TestObjectEntries(t *testing.T) { want []api.ObjectMetadata }{ {"/", "", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/foo/", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/", Size: 7, Health: .5}}}, - {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - {"/gab/", "", "", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}}}, - {"/fileś/", "", "", "", []api.ObjectMetadata{{Name: "/fileś/śpecial", Size: 6, Health: 1}}}, - - {"/", "f", "", "", []api.ObjectMetadata{{Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, - {"/", "F", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}}}, - {"/foo/", "fo", "", "", []api.ObjectMetadata{}}, - {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}}}, - {"/gab/", "/guub", "", "", []api.ObjectMetadata{}}, - - {"/", "", "name", "ASC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "name", "DESC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}}}, - - {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, - - {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + // {"/foo/", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/", Size: 7, Health: .5}}}, + // {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, + // {"/gab/", "", "", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}}}, + // {"/fileś/", "", "", "", []api.ObjectMetadata{{Name: "/fileś/śpecial", Size: 6, Health: 1}}}, + // + // {"/", "f", "", "", []api.ObjectMetadata{{Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + // {"/", "F", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}}}, + // {"/foo/", "fo", "", "", []api.ObjectMetadata{}}, + // {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}}}, + // {"/gab/", "/guub", "", "", []api.ObjectMetadata{}}, + // + // {"/", "", "name", "ASC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, + // {"/", "", "name", "DESC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}}}, + // + // {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + // {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + // + // {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + // {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, } for _, test := range tests { got, _, err := ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", 0, -1) diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 75f5d72cf..ae8c02657 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -316,7 +316,7 @@ CREATE TABLE `multipart_parts` ( -- dbDirectory CREATE TABLE `directories` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, - `parent_id` bigint unsigned NOT NULL, + `parent_id` bigint unsigned, `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, KEY `idx_directories_parent_id` (`parent_id`), UNIQUE KEY `idx_directories_name` (`name`), diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 31ccd4c9d..b57a600d1 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -45,12 +45,12 @@ CREATE TABLE `buckets` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` date CREATE INDEX `idx_buckets_name` ON `buckets`(`name`); -- dbDirectory -CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`parent_id` integer NOT NULL,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories`(`id`)); +CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories`(`id`)); CREATE INDEX `idx_directories_parent_id` ON `directories`(`parent_id`); -CREATE UNIQUE INDEX `idx_directories_name` ON `directories`(`name`); +CREATE UNIQUE INDEX `idx_directories_name_parent_id` ON `directories`(`name`, `parent_id`); -- dbObject -CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer NOT NULL, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`); +CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); CREATE INDEX `idx_objects_db_bucket_id` ON `objects`(`db_bucket_id`); CREATE INDEX `idx_objects_etag` ON `objects`(`etag`); CREATE INDEX `idx_objects_health` ON `objects`(`health`); From 8ca62432a61733cbfa266a0f7f6c999f83fcb4fc Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 24 Apr 2024 16:12:17 +0200 Subject: [PATCH 03/52] stores: prefix is working in ObjectEntries --- stores/metadata.go | 78 ++++++++++++++++++++++++++++------------- stores/metadata_test.go | 40 ++++++++++----------- 2 files changed, 74 insertions(+), 44 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 418411fe2..ac470232c 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1243,12 +1243,24 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort return nil, false, err } - objectsQuery := ` + prefixExpr := "TRUE" + if len(prefix) > 0 { + prefixExpr = "SUBSTR(o.object_id, 1, ?) = ?" + } + + objectsQuery := fmt.Sprintf(` SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id -WHERE o.db_directory_id = ? AND b.name = ? - ` +WHERE o.db_directory_id = ? AND b.name = ? AND %s +UNION +SELECT '' as ETag, MAX(o.created_at) as ModTime, ? || d.name || '/' as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType +FROM objects o +INNER JOIN buckets b ON o.db_bucket_id = b.id +INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(ObjectName)) = ObjectName AND %s +WHERE b.name = ? AND d.parent_id = ? +GROUP BY d.id + `, prefixExpr, prefixExpr) // // build objects query & parameters // objectsQuery := fmt.Sprintf(` @@ -1278,7 +1290,7 @@ WHERE o.db_directory_id = ? AND b.name = ? // } objectsQueryParams := []interface{}{ - dirID, bucket, + //utf8.RuneCountInString(prefix), path + prefix, //utf8.RuneCountInString(path) + 1, // onameExpr //path, utf8.RuneCountInString(path) + 1, // onameExpr //path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr @@ -1300,6 +1312,21 @@ WHERE o.db_directory_id = ? AND b.name = ? //path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr //path, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? } + if len(prefix) > 0 { + objectsQueryParams = append(objectsQueryParams, []interface{}{ + dirID, bucket, + utf8.RuneCountInString(path + prefix), path + prefix, + path, + utf8.RuneCountInString(path + prefix), path + prefix, + bucket, dirID, + }...) + } else { + objectsQueryParams = append(objectsQueryParams, []interface{}{ + dirID, bucket, + path, + bucket, dirID, + }...) + } // build marker expr markerExpr := "1 = 1" @@ -1353,6 +1380,9 @@ WHERE o.db_directory_id = ? AND b.name = ? } // build order clause + if sortBy == api.ObjectSortByName { + sortBy = "ObjectName" + } orderByClause := fmt.Sprintf("%s %s", sortBy, sortDir) if sortBy != api.ObjectSortByName { orderByClause += ", ObjectName" @@ -1375,25 +1405,25 @@ WHERE o.db_directory_id = ? AND b.name = ? } // fetch directories - var childDirs []dbDirectory - if err := s.db.Find(&childDirs, "parent_id = ?", dirID).Error; err != nil { - return nil, false, err - } - var dirRows []rawObjectMetadata - err = s.db.Raw(` -SELECT ? || d.name || '/' as ObjectName, MAX(o.created_at) as ModTime, SUM(o.size) as Size, MIN(o.health) as Health -FROM objects o -INNER JOIN buckets b ON o.db_bucket_id = b.id -INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(ObjectName)) = ObjectName -WHERE b.name = ? AND d.parent_id = ? -GROUP BY d.id -ORDER BY ObjectName ASC - `, path, bucket, dirID).Scan(&dirRows).Error - if err != nil { - return nil, false, err - } - //dirRow.Name = fmt.Sprintf("%s%s/", path, dir.Name) - rows = append(rows, dirRows...) + // var childDirs []dbDirectory + // if err := s.db.Find(&childDirs, "parent_id = ?", dirID).Error; err != nil { + // return nil, false, err + // } + // var dirRows []rawObjectMetadata + // err = s.db.Raw(` + //SELECT ? || d.name || '/' as ObjectName, MAX(o.created_at) as ModTime, SUM(o.size) as Size, MIN(o.health) as Health + //FROM objects o + //INNER JOIN buckets b ON o.db_bucket_id = b.id + //INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(ObjectName)) = ObjectName + //WHERE b.name = ? AND d.parent_id = ? + //GROUP BY d.id + //ORDER BY ObjectName ASC + // `, path, bucket, dirID).Scan(&dirRows).Error + // if err != nil { + // return nil, false, err + // } + // //dirRow.Name = fmt.Sprintf("%s%s/", path, dir.Name) + // rows = append(rows, dirRows...) // trim last element if we have more if len(rows) == limit { @@ -1787,7 +1817,7 @@ func (s *SQLStore) dirID(ctx context.Context, tx *gorm.DB, dirPath string) (uint } splitPath := strings.Split(dirPath[1:len(dirPath)-1], "/") - for _, dir := range splitPath[:len(splitPath)-1] { + for _, dir := range splitPath { if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND parent_id = ?", dir, dirID). Scan(&dirID).Error; err != nil { return 0, fmt.Errorf("failed to fetch root directory: %w", err) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 97c0c2b3f..d7a1a7c0f 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1461,7 +1461,7 @@ func TestObjectEntries(t *testing.T) { entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasPrefix(entries[i].Name, "/") + isDir := strings.HasSuffix(entries[i].Name, "/") if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType != testMimeType) { t.Fatal("unexpected mime type", entries[i].MimeType) } @@ -1496,25 +1496,25 @@ func TestObjectEntries(t *testing.T) { want []api.ObjectMetadata }{ {"/", "", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, - // {"/foo/", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/", Size: 7, Health: .5}}}, - // {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, - // {"/gab/", "", "", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}}}, - // {"/fileś/", "", "", "", []api.ObjectMetadata{{Name: "/fileś/śpecial", Size: 6, Health: 1}}}, - // - // {"/", "f", "", "", []api.ObjectMetadata{{Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, - // {"/", "F", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}}}, - // {"/foo/", "fo", "", "", []api.ObjectMetadata{}}, - // {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}}}, - // {"/gab/", "/guub", "", "", []api.ObjectMetadata{}}, - // - // {"/", "", "name", "ASC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, - // {"/", "", "name", "DESC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}}}, - // - // {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - // {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, - // - // {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - // {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + {"/foo/", "", "", "", []api.ObjectMetadata{{Name: "/foo/bar", Size: 1, Health: 1}, {Name: "/foo/bat", Size: 2, Health: 1}, {Name: "/foo/baz/", Size: 7, Health: .5}}}, + {"/foo/baz/", "", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}, {Name: "/foo/baz/quuz", Size: 4, Health: .5}}}, + {"/gab/", "", "", "", []api.ObjectMetadata{{Name: "/gab/guub", Size: 5, Health: 1}}}, + {"/fileś/", "", "", "", []api.ObjectMetadata{{Name: "/fileś/śpecial", Size: 6, Health: 1}}}, + + {"/", "f", "", "", []api.ObjectMetadata{{Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + {"/", "F", "", "", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}}}, + {"/foo/", "fo", "", "", []api.ObjectMetadata{}}, + {"/foo/baz/", "quux", "", "", []api.ObjectMetadata{{Name: "/foo/baz/quux", Size: 3, Health: .75}}}, + {"/gab/", "/guub", "", "", []api.ObjectMetadata{}}, + + {"/", "", "name", "ASC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "name", "DESC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}}}, + + // {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + // {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + + // {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + // {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, } for _, test := range tests { got, _, err := ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", 0, -1) From 94e8d0dad4cdf480f90ed9497cb57647a041ced6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 24 Apr 2024 16:26:06 +0200 Subject: [PATCH 04/52] stores: TestObjectEntries passing again --- stores/metadata.go | 118 +++++++--------------------------------- stores/metadata_test.go | 14 ++--- 2 files changed, 23 insertions(+), 109 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index ac470232c..9906ab580 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "math" - "regexp" "strings" "time" "unicode/utf8" @@ -1173,11 +1172,6 @@ func (s *SQLStore) SearchObjects(ctx context.Context, bucket, substring string, return objects, nil } -func replaceAnyValue(query string) string { - re := regexp.MustCompile(`ANY_VALUE\((.*?)\)`) - return re.ReplaceAllString(query, "$1") -} - func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sortBy, sortDir, marker string, offset, limit int) (metadata []api.ObjectMetadata, hasMore bool, err error) { // sanity check we are passing a directory if !strings.HasSuffix(path, "/") { @@ -1228,26 +1222,21 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort offset = 0 } - // indexHint := "" - // if !isSQLite(s.db) { - // indexHint = "USE INDEX (idx_object_bucket, idx_objects_created_at)" - // } - - // onameExpr := fmt.Sprintf("CASE INSTR(SUBSTR(object_id, ?), '/') WHEN 0 THEN %s ELSE %s END", - // sqlConcat(s.db, "?", "SUBSTR(object_id, ?)"), - // sqlConcat(s.db, "?", "substr(SUBSTR(object_id, ?), 1, INSTR(SUBSTR(object_id, ?), '/'))"), - // ) - - dirID, err := s.dirID(ctx, s.db, path) + // fetch id of directory to query + dirID, err := s.dirID(s.db, path) if err != nil { return nil, false, err } + // build prefix expression prefixExpr := "TRUE" - if len(prefix) > 0 { + if prefix != "" { prefixExpr = "SUBSTR(o.object_id, 1, ?) = ?" } + // objectsQuery consists of 2 parts + // 1. fetch all objects in requested directory + // 2. fetch all sub-directories objectsQuery := fmt.Sprintf(` SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType FROM objects o @@ -1262,70 +1251,22 @@ WHERE b.name = ? AND d.parent_id = ? GROUP BY d.id `, prefixExpr, prefixExpr) - // // build objects query & parameters - // objectsQuery := fmt.Sprintf(` - //SELECT ETag, ModTime, oname as Name, Size, Health, MimeType - //FROM ( - // SELECT - // ANY_VALUE(etag) AS ETag, - // MAX(objects.created_at) AS ModTime, - // %s AS oname, - // SUM(size) AS Size, - // MIN(health) as Health, - // ANY_VALUE(mime_type) as MimeType - // FROM objects %s - // INNER JOIN buckets b ON objects.db_bucket_id = b.id - // WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND b.name = ? AND SUBSTR(%s, 1, ?) = ? AND %s != ? - // GROUP BY oname - //) baseQuery - //`, - // onameExpr, - // indexHint, - // onameExpr, - // onameExpr, - // ) - - // if isSQLite(s.db) { - // objectsQuery = replaceAnyValue(objectsQuery) - // } - - objectsQueryParams := []interface{}{ - //utf8.RuneCountInString(prefix), path + prefix, - //utf8.RuneCountInString(path) + 1, // onameExpr - //path, utf8.RuneCountInString(path) + 1, // onameExpr - //path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr - - //path + "%", - - //utf8.RuneCountInString(path), // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - //path, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - //bucket, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - - //utf8.RuneCountInString(path) + 1, // onameExpr - //path, utf8.RuneCountInString(path) + 1, // onameExpr - //path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr - - //utf8.RuneCountInString(path + prefix), // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - //path + prefix, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - //utf8.RuneCountInString(path) + 1, // onameExpr - //path, utf8.RuneCountInString(path) + 1, // onameExpr - //path, utf8.RuneCountInString(path) + 1, utf8.RuneCountInString(path) + 1, // onameExpr - //path, // WHERE SUBSTR(%s, 1, ?) = ? AND %s != ? AND b.name = ? - } - if len(prefix) > 0 { - objectsQueryParams = append(objectsQueryParams, []interface{}{ + // build query params + var objectsQueryParams []interface{} + if prefix != "" { + objectsQueryParams = []interface{}{ dirID, bucket, utf8.RuneCountInString(path + prefix), path + prefix, path, utf8.RuneCountInString(path + prefix), path + prefix, bucket, dirID, - }...) + } } else { - objectsQueryParams = append(objectsQueryParams, []interface{}{ + objectsQueryParams = []interface{}{ dirID, bucket, path, bucket, dirID, - }...) + } } // build marker expr @@ -1337,7 +1278,7 @@ GROUP BY d.id var markerHealth float64 if err = s.db. WithContext(ctx). - Raw(fmt.Sprintf(`SELECT Health FROM (%s WHERE oname >= ? ORDER BY oname LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). + Raw(fmt.Sprintf(`SELECT Health FROM (SELECT * FROM (%s) WHERE ObjectName >= ? ORDER BY ObjectName LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). Scan(&markerHealth). Error; err != nil { return @@ -1354,7 +1295,7 @@ GROUP BY d.id var markerSize float64 if err = s.db. WithContext(ctx). - Raw(fmt.Sprintf(`SELECT Size FROM (%s WHERE oname >= ? ORDER BY oname LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). + Raw(fmt.Sprintf(`SELECT Size FROM (SELECT * FROM (%s) WHERE ObjectName >= ? ORDER BY ObjectName LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). Scan(&markerSize). Error; err != nil { return @@ -1404,27 +1345,6 @@ GROUP BY d.id return } - // fetch directories - // var childDirs []dbDirectory - // if err := s.db.Find(&childDirs, "parent_id = ?", dirID).Error; err != nil { - // return nil, false, err - // } - // var dirRows []rawObjectMetadata - // err = s.db.Raw(` - //SELECT ? || d.name || '/' as ObjectName, MAX(o.created_at) as ModTime, SUM(o.size) as Size, MIN(o.health) as Health - //FROM objects o - //INNER JOIN buckets b ON o.db_bucket_id = b.id - //INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(ObjectName)) = ObjectName - //WHERE b.name = ? AND d.parent_id = ? - //GROUP BY d.id - //ORDER BY ObjectName ASC - // `, path, bucket, dirID).Scan(&dirRows).Error - // if err != nil { - // return nil, false, err - // } - // //dirRow.Name = fmt.Sprintf("%s%s/", path, dir.Name) - // rows = append(rows, dirRows...) - // trim last element if we have more if len(rows) == limit { hasMore = true @@ -1804,7 +1724,7 @@ func (s *SQLStore) DeleteHostSector(ctx context.Context, hk types.PublicKey, roo return deletedSectors, err } -func (s *SQLStore) dirID(ctx context.Context, tx *gorm.DB, dirPath string) (uint, error) { +func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { if !strings.HasPrefix(dirPath, "/") { return 0, fmt.Errorf("path must start with /") } else if !strings.HasSuffix(dirPath, "/") { @@ -1827,7 +1747,7 @@ func (s *SQLStore) dirID(ctx context.Context, tx *gorm.DB, dirPath string) (uint return dirID, nil } -func (s *SQLStore) makeDirsForPath(ctx context.Context, tx *gorm.DB, path string) (uint, error) { +func (s *SQLStore) makeDirsForPath(tx *gorm.DB, path string) (uint, error) { if !strings.HasPrefix(path, "/") { return 0, fmt.Errorf("path must start with /") } @@ -1890,7 +1810,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, } // create the dir - dirID, err := s.makeDirsForPath(ctx, tx, path) + dirID, err := s.makeDirsForPath(tx, path) if err != nil { return fmt.Errorf("failed to create directories: %w", err) } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index d7a1a7c0f..389386e40 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1443,12 +1443,6 @@ func TestObjectEntries(t *testing.T) { } } - var dirs []dbDirectory - ss.db.Find(&dirs) - for i, d := range dirs { - fmt.Println(i, d.ID, d.Name, d.ParentID) - } - // assertMetadata asserts both ModTime, MimeType and ETag and clears them so the // entries are ready for comparison assertMetadata := func(entries []api.ObjectMetadata) { @@ -1510,11 +1504,11 @@ func TestObjectEntries(t *testing.T) { {"/", "", "name", "ASC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/gab/", Size: 5, Health: 1}}}, {"/", "", "name", "DESC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}}}, - // {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - // {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + {"/", "", "health", "ASC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "health", "DESC", []api.ObjectMetadata{{Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, - // {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, - // {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, + {"/", "", "size", "DESC", []api.ObjectMetadata{{Name: "/foo/", Size: 10, Health: .5}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/gab/", Size: 5, Health: 1}}}, + {"/", "", "size", "ASC", []api.ObjectMetadata{{Name: "/gab/", Size: 5, Health: 1}, {Name: "/fileś/", Size: 6, Health: 1}, {Name: "/FOO/", Size: 7, Health: 1}, {Name: "/foo/", Size: 10, Health: .5}}}, } for _, test := range tests { got, _, err := ss.ObjectEntries(ctx, api.DefaultBucketName, test.path, test.prefix, test.sortBy, test.sortDir, "", 0, -1) From c7132689951c4f2b4f5c0fab9926009b3e578dcf Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 24 Apr 2024 17:00:42 +0200 Subject: [PATCH 05/52] store: fix TestObjectBasic --- stores/metadata.go | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 9906ab580..c1b943003 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1742,16 +1742,11 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { Scan(&dirID).Error; err != nil { return 0, fmt.Errorf("failed to fetch root directory: %w", err) } - fmt.Println("dirID", dirID) } return dirID, nil } func (s *SQLStore) makeDirsForPath(tx *gorm.DB, path string) (uint, error) { - if !strings.HasPrefix(path, "/") { - return 0, fmt.Errorf("path must start with /") - } - // Create root dir. if err := tx.Exec("INSERT INTO directories (id) VALUES (1) ON CONFLICT DO NOTHING").Error; err != nil { return 0, fmt.Errorf("failed to create root directory: %w", err) @@ -1759,9 +1754,8 @@ func (s *SQLStore) makeDirsForPath(tx *gorm.DB, path string) (uint, error) { dirID := uint(1) // Create remaining directories. - splitPath := strings.Split(path[1:], "/") + splitPath := strings.Split(strings.TrimPrefix(path, "/"), "/") for _, dir := range splitPath[:len(splitPath)-1] { - fmt.Println("creating", dir) dbDir := dbDirectory{ Name: dir, ParentID: dirID, From 36e09c9274025043ac5bf292f77596cf0deaeb07 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 24 Apr 2024 17:05:50 +0200 Subject: [PATCH 06/52] stores: fix TestObjectsBySlabKey --- stores/metadata.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index c1b943003..6f5e74d65 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2527,7 +2527,7 @@ func (s *SQLStore) ObjectsBySlabKey(ctx context.Context, bucket string, slabKey err = s.retryTransaction(ctx, func(tx *gorm.DB) error { return tx.Raw(` -SELECT DISTINCT obj.object_id as Name, obj.size as Size, obj.mime_type as MimeType, sla.health as Health +SELECT DISTINCT obj.object_id as ObjectName, obj.size as Size, obj.mime_type as MimeType, sla.health as Health FROM slabs sla INNER JOIN slices sli ON sli.db_slab_id = sla.id INNER JOIN objects obj ON sli.db_object_id = obj.id From eb9b44e8d141209f1349660978d8c2ea3028baa2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 24 Apr 2024 17:15:09 +0200 Subject: [PATCH 07/52] stores: fix TestSQLMetadataStore and TestSlabCleanup --- stores/metadata_test.go | 39 +++++++++++++++---------- stores/migrations/mysql/main/schema.sql | 3 +- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 389386e40..9adab6306 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -1047,11 +1047,12 @@ func TestSQLMetadataStore(t *testing.T) { one := uint(1) expectedObj := dbObject{ - DBBucketID: ss.DefaultBucketID(), - Health: 1, - ObjectID: objID, - Key: obj1Key, - Size: obj1.TotalSize(), + DBDirectoryID: 1, + DBBucketID: ss.DefaultBucketID(), + Health: 1, + ObjectID: objID, + Key: obj1Key, + Size: obj1.TotalSize(), Slabs: []dbSlice{ { DBObjectID: &one, @@ -4081,19 +4082,26 @@ func TestSlabCleanup(t *testing.T) { t.Fatal(err) } + dirID, err := ss.makeDirsForPath(ss.db, "1") + if err != nil { + t.Fatal(err) + } + // create objects obj1 := dbObject{ - ObjectID: "1", - DBBucketID: ss.DefaultBucketID(), - Health: 1, + DBDirectoryID: dirID, + ObjectID: "1", + DBBucketID: ss.DefaultBucketID(), + Health: 1, } if err := ss.db.Create(&obj1).Error; err != nil { t.Fatal(err) } obj2 := dbObject{ - ObjectID: "2", - DBBucketID: ss.DefaultBucketID(), - Health: 1, + DBDirectoryID: dirID, + ObjectID: "2", + DBBucketID: ss.DefaultBucketID(), + Health: 1, } if err := ss.db.Create(&obj2).Error; err != nil { t.Fatal(err) @@ -4128,7 +4136,7 @@ func TestSlabCleanup(t *testing.T) { } // delete the object - err := ss.RemoveObjectBlocking(context.Background(), api.DefaultBucketName, obj1.ObjectID) + err = ss.RemoveObjectBlocking(context.Background(), api.DefaultBucketName, obj1.ObjectID) if err != nil { t.Fatal(err) } @@ -4164,9 +4172,10 @@ func TestSlabCleanup(t *testing.T) { t.Fatal(err) } obj3 := dbObject{ - ObjectID: "3", - DBBucketID: ss.DefaultBucketID(), - Health: 1, + DBDirectoryID: dirID, + ObjectID: "3", + DBBucketID: ss.DefaultBucketID(), + Health: 1, } if err := ss.db.Create(&obj3).Error; err != nil { t.Fatal(err) diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index ae8c02657..123b4d005 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -318,6 +318,7 @@ CREATE TABLE `directories` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `parent_id` bigint unsigned, `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + PRIMARY KEY (`id`), KEY `idx_directories_parent_id` (`parent_id`), UNIQUE KEY `idx_directories_name` (`name`), CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE @@ -343,7 +344,7 @@ CREATE TABLE `objects` ( KEY `idx_objects_etag` (`etag`), KEY `idx_objects_size` (`size`), KEY `idx_objects_created_at` (`created_at`), - CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`) + CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`), CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; From bd2d7e7cc6ffe09dc417181d98f1017508d385d1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 10:15:08 +0200 Subject: [PATCH 08/52] stores: fix makeDirsForPath for MySQL --- stores/metadata.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 6f5e74d65..b917398cb 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1748,10 +1748,15 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { func (s *SQLStore) makeDirsForPath(tx *gorm.DB, path string) (uint, error) { // Create root dir. - if err := tx.Exec("INSERT INTO directories (id) VALUES (1) ON CONFLICT DO NOTHING").Error; err != nil { + dirID := uint(1) + if err := tx.Model(&dbDirectory{}). + Clauses(clause.OnConflict{ + DoNothing: true, + }).Create(map[string]any{ + "id": dirID, + }).Error; err != nil { return 0, fmt.Errorf("failed to create root directory: %w", err) } - dirID := uint(1) // Create remaining directories. splitPath := strings.Split(strings.TrimPrefix(path, "/"), "/") From 677e0d20069e8b41e8db517d51cde81fa43f7622 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 11:11:13 +0200 Subject: [PATCH 09/52] stores: fix ObjectEntries query for MySQL --- stores/metadata.go | 6 +++++- stores/migrations/mysql/main/schema.sql | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index b917398cb..1ec6fe12e 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1246,7 +1246,7 @@ UNION SELECT '' as ETag, MAX(o.created_at) as ModTime, ? || d.name || '/' as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id -INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(ObjectName)) = ObjectName AND %s +INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(? || d.name || '/')) = ? || d.name || '/' AND %s WHERE b.name = ? AND d.parent_id = ? GROUP BY d.id `, prefixExpr, prefixExpr) @@ -1258,6 +1258,8 @@ GROUP BY d.id dirID, bucket, utf8.RuneCountInString(path + prefix), path + prefix, path, + path, + path, utf8.RuneCountInString(path + prefix), path + prefix, bucket, dirID, } @@ -1265,6 +1267,8 @@ GROUP BY d.id objectsQueryParams = []interface{}{ dirID, bucket, path, + path, + path, bucket, dirID, } } diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 123b4d005..ac7041343 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -316,6 +316,7 @@ CREATE TABLE `multipart_parts` ( -- dbDirectory CREATE TABLE `directories` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, `parent_id` bigint unsigned, `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), From 3cff23d8dbb2f9a144c9ac8c09c392011b1b1b8e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 12:08:53 +0200 Subject: [PATCH 10/52] stores: db agnostic concat --- stores/metadata.go | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 1ec6fe12e..0381975c2 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1243,13 +1243,17 @@ FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id WHERE o.db_directory_id = ? AND b.name = ? AND %s UNION -SELECT '' as ETag, MAX(o.created_at) as ModTime, ? || d.name || '/' as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType +SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id -INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(? || d.name || '/')) = ? || d.name || '/' AND %s +INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(%s)) = %s AND %s WHERE b.name = ? AND d.parent_id = ? GROUP BY d.id - `, prefixExpr, prefixExpr) + `, prefixExpr, + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), + prefixExpr) // build query params var objectsQueryParams []interface{} @@ -1765,18 +1769,21 @@ func (s *SQLStore) makeDirsForPath(tx *gorm.DB, path string) (uint, error) { // Create remaining directories. splitPath := strings.Split(strings.TrimPrefix(path, "/"), "/") for _, dir := range splitPath[:len(splitPath)-1] { - dbDir := dbDirectory{ - Name: dir, - ParentID: dirID, - } if err := tx.Clauses(clause.OnConflict{ - Columns: []clause.Column{{Name: "name"}, {Name: "parent_id"}}, - UpdateAll: true, + DoNothing: true, }). - Create(&dbDir).Error; err != nil { + Create(&dbDirectory{ + Name: dir, + ParentID: dirID, + }).Error; err != nil { return 0, fmt.Errorf("failed to create directory %v: %w", dir, err) } - dirID = dbDir.ID + var childID uint + if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND parent_id = ?", dir, dirID). + Scan(&childID).Error; err != nil { + return 0, fmt.Errorf("failed to fetch directory id %v: %w", dir, err) + } + dirID = childID } return dirID, nil } From 548d5e8e5929d04ed1fdec1c25209d7e773215aa Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 13:37:11 +0200 Subject: [PATCH 11/52] stores: handle char_length vs length --- stores/metadata.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 0381975c2..de7975908 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1234,6 +1234,11 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort prefixExpr = "SUBSTR(o.object_id, 1, ?) = ?" } + lengthFn := "CHAR_LENGTH" + if isSQLite(s.db) { + lengthFn = "LENGTH" + } + // objectsQuery consists of 2 parts // 1. fetch all objects in requested directory // 2. fetch all sub-directories @@ -1246,11 +1251,12 @@ UNION SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id -INNER JOIN directories d ON SUBSTR(o.object_id, 1, LENGTH(%s)) = %s AND %s +INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s WHERE b.name = ? AND d.parent_id = ? GROUP BY d.id `, prefixExpr, sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), + lengthFn, sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), prefixExpr) @@ -1286,7 +1292,7 @@ GROUP BY d.id var markerHealth float64 if err = s.db. WithContext(ctx). - Raw(fmt.Sprintf(`SELECT Health FROM (SELECT * FROM (%s) WHERE ObjectName >= ? ORDER BY ObjectName LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). + Raw(fmt.Sprintf(`SELECT Health FROM (SELECT * FROM (%s) h WHERE ObjectName >= ? ORDER BY ObjectName LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). Scan(&markerHealth). Error; err != nil { return @@ -1303,7 +1309,7 @@ GROUP BY d.id var markerSize float64 if err = s.db. WithContext(ctx). - Raw(fmt.Sprintf(`SELECT Size FROM (SELECT * FROM (%s) WHERE ObjectName >= ? ORDER BY ObjectName LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). + Raw(fmt.Sprintf(`SELECT Size FROM (SELECT * FROM (%s) s WHERE ObjectName >= ? ORDER BY ObjectName LIMIT 1) as n`, objectsQuery), append(objectsQueryParams, marker)...). Scan(&markerSize). Error; err != nil { return From 7b58af9134926bca67f05c9534bb5fc881787af1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 13:39:05 +0200 Subject: [PATCH 12/52] stores: add LIKE for index usage --- stores/metadata.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index de7975908..8981cf310 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1251,11 +1251,12 @@ UNION SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id -INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s +INNER JOIN directories d ON o.object_id LIKE %s AND SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s WHERE b.name = ? AND d.parent_id = ? GROUP BY d.id `, prefixExpr, sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/%'"), lengthFn, sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), @@ -1270,6 +1271,7 @@ GROUP BY d.id path, path, path, + path, utf8.RuneCountInString(path + prefix), path + prefix, bucket, dirID, } @@ -1279,6 +1281,7 @@ GROUP BY d.id path, path, path, + path, bucket, dirID, } } From 7d49911af94e76a55bea81b3285d441bd4b404ff Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 13:59:10 +0200 Subject: [PATCH 13/52] stores: make makeDirsForPath a function --- stores/metadata.go | 4 ++-- stores/metadata_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 8981cf310..6ca9362a7 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1763,7 +1763,7 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { return dirID, nil } -func (s *SQLStore) makeDirsForPath(tx *gorm.DB, path string) (uint, error) { +func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { // Create root dir. dirID := uint(1) if err := tx.Model(&dbDirectory{}). @@ -1829,7 +1829,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, } // create the dir - dirID, err := s.makeDirsForPath(tx, path) + dirID, err := makeDirsForPath(tx, path) if err != nil { return fmt.Errorf("failed to create directories: %w", err) } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 9adab6306..e46d15410 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4082,7 +4082,7 @@ func TestSlabCleanup(t *testing.T) { t.Fatal(err) } - dirID, err := ss.makeDirsForPath(ss.db, "1") + dirID, err := makeDirsForPath(ss.db, "1") if err != nil { t.Fatal(err) } From 5f36dd250c83db638997daedab1bd732ae1faec7 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 14:09:25 +0200 Subject: [PATCH 14/52] e2e: fix TestObjectEntries --- internal/test/e2e/cluster_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 72ac4b95b..2ad1b1727 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -251,14 +251,15 @@ func TestObjectEntries(t *testing.T) { entries[i].ModTime = api.TimeRFC3339{} // assert mime type - if entries[i].MimeType == "" { - t.Fatal("mime type should be set", entries[i].MimeType, entries[i].Name) + isDir := strings.HasSuffix(entries[i].Name, "/") + if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { + t.Fatal("unexpected mime type", entries[i].MimeType) } entries[i].MimeType = "" // assert etag - if entries[i].ETag == "" { - t.Fatal("ETag should be set") + if isDir != (entries[i].ETag == "") { + t.Fatal("etag should be set for files and empty for dirs") } entries[i].ETag = "" } From e484a3f9f2ff9d3cea436e221aa6ded3634d5cab Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 14:14:23 +0200 Subject: [PATCH 15/52] stores: mysql migration --- stores/migrations.go | 25 +++++++++++++++++++ .../main/migration_00008_directories.sql | 14 +++++++++++ stores/migrations/mysql/main/schema.sql | 1 + 3 files changed, 40 insertions(+) create mode 100644 stores/migrations/mysql/main/migration_00008_directories.sql diff --git a/stores/migrations.go b/stores/migrations.go index 4ac6b755e..999f1c240 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -68,6 +68,31 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { return performMigration(tx, dbIdentifier, "00007_host_checks", logger) }, }, + { + ID: "00008_directories", + Migrate: func(tx *gorm.DB) error { + if err := performMigration(tx, dbIdentifier, "00008_directories", logger); err != nil { + return fmt.Errorf("failed to migrate: %v", err) + } + // loop over all objects one-by-one and create the corresponding directory + for offset := 0; ; offset++ { + var obj dbObject + if err := tx.Offset(offset).Limit(1).Take(&obj).Error; errors.Is(err, gorm.ErrRecordNotFound) { + break // done + } else if err != nil { + return fmt.Errorf("failed to fetch object: %v", err) + } + dirID, err := makeDirsForPath(tx, obj.ObjectID) + if err != nil { + return fmt.Errorf("failed to create directory %s: %w", obj.ObjectID, err) + } + if err := tx.Where(obj).Update("db_directory_id", dirID).Error; err != nil { + return fmt.Errorf("failed to update object %s: %w", obj.ObjectID, err) + } + } + return nil + }, + }, } // Create migrator. diff --git a/stores/migrations/mysql/main/migration_00008_directories.sql b/stores/migrations/mysql/main/migration_00008_directories.sql new file mode 100644 index 000000000..ee288f418 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00008_directories.sql @@ -0,0 +1,14 @@ +-- dbDirectory +CREATE TABLE `directories` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `parent_id` bigint unsigned, + `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `idx_directories_parent_id` (`parent_id`), + UNIQUE KEY `idx_directories_name` (`name`), + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + +ALTER TABLE `objects` ADD COLUMN `db_directory_id` bigint unsigned NOT NULL; +CREATE INDEX `idx_objects_db_directory_id` ON `objects` (`db_directory_id`); \ No newline at end of file diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index ac7041343..7540546bb 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -345,6 +345,7 @@ CREATE TABLE `objects` ( KEY `idx_objects_etag` (`etag`), KEY `idx_objects_size` (`size`), KEY `idx_objects_created_at` (`created_at`), + KEY `idx_objects_db_directory_id` (`db_directory_id`), CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`), CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; From a8f19e486819c09a90ba8678df17c030cc313f1d Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 14:26:04 +0200 Subject: [PATCH 16/52] e2e: fix TestUploadDownloadExtended --- internal/test/e2e/cluster_test.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 2ad1b1727..16a335265 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -671,14 +671,16 @@ func TestUploadDownloadExtended(t *testing.T) { tt.OKAll(w.UploadObject(context.Background(), bytes.NewReader(file2), api.DefaultBucketName, "fileś/file2", api.UploadObjectOptions{})) // fetch all entries from the worker - entries, err := cluster.Worker.ObjectEntries(context.Background(), api.DefaultBucketName, "", api.GetObjectOptions{}) + entries, err := cluster.Worker.ObjectEntries(context.Background(), api.DefaultBucketName, "fileś/", api.GetObjectOptions{}) tt.OK(err) - if len(entries) != 1 { - t.Fatal("expected one entry to be returned", len(entries)) + if len(entries) != 2 { + t.Fatal("expected two entries to be returned", len(entries)) } - if entries[0].MimeType != "application/octet-stream" { - t.Fatal("wrong mime type", entries[0].MimeType) + for _, entry := range entries { + if entry.MimeType != "application/octet-stream" { + t.Fatal("wrong mime type", entry.MimeType) + } } // fetch entries with "file" prefix From b5b2640c7ef16661495ae8653522532167689f02 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 14:41:04 +0200 Subject: [PATCH 17/52] e2e: fix TestMultipartUploads --- stores/metadata.go | 3 ++- stores/migrations.go | 5 +++++ .../main/migration_00008_directories.sql | 4 +++- stores/migrations/mysql/main/schema.sql | 2 +- stores/multipart.go | 19 +++++++++++++------ 5 files changed, 24 insertions(+), 9 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 6ca9362a7..7b5fec6f9 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1776,7 +1776,8 @@ func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { } // Create remaining directories. - splitPath := strings.Split(strings.TrimPrefix(path, "/"), "/") + path = strings.TrimPrefix(path, "/") + splitPath := strings.Split(path, "/") for _, dir := range splitPath[:len(splitPath)-1] { if err := tx.Clauses(clause.OnConflict{ DoNothing: true, diff --git a/stores/migrations.go b/stores/migrations.go index 999f1c240..3e5a43caa 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -75,7 +75,11 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { return fmt.Errorf("failed to migrate: %v", err) } // loop over all objects one-by-one and create the corresponding directory + logger.Info("beginning post-migration directory creation, this might take a while") for offset := 0; ; offset++ { + if offset > 0 && offset%1000 == 0 { + logger.Infof("processed %v objects", offset) + } var obj dbObject if err := tx.Offset(offset).Limit(1).Take(&obj).Error; errors.Is(err, gorm.ErrRecordNotFound) { break // done @@ -90,6 +94,7 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { return fmt.Errorf("failed to update object %s: %w", obj.ObjectID, err) } } + logger.Info("post-migration directory creation complete") return nil }, }, diff --git a/stores/migrations/mysql/main/migration_00008_directories.sql b/stores/migrations/mysql/main/migration_00008_directories.sql index ee288f418..0f91e6360 100644 --- a/stores/migrations/mysql/main/migration_00008_directories.sql +++ b/stores/migrations/mysql/main/migration_00008_directories.sql @@ -10,5 +10,7 @@ CREATE TABLE `directories` ( CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -ALTER TABLE `objects` ADD COLUMN `db_directory_id` bigint unsigned NOT NULL; +ALTER TABLE `objects` +ADD COLUMN `db_directory_id` bigint unsigned NOT NULL; +ADD CONSTRAINT `fk_objects_db_directory_id` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`); CREATE INDEX `idx_objects_db_directory_id` ON `objects` (`db_directory_id`); \ No newline at end of file diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 7540546bb..be8d72a9c 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -347,7 +347,7 @@ CREATE TABLE `objects` ( KEY `idx_objects_created_at` (`created_at`), KEY `idx_objects_db_directory_id` (`db_directory_id`), CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets` (`id`), - CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`) + CONSTRAINT `fk_objects_db_directory_id` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbSetting diff --git a/stores/multipart.go b/stores/multipart.go index 5fde55d7b..67a042330 100644 --- a/stores/multipart.go +++ b/stores/multipart.go @@ -403,14 +403,21 @@ func (s *SQLStore) CompleteMultipartUpload(ctx context.Context, bucket, path str sum := h.Sum() eTag = hex.EncodeToString(sum[:]) + // Create the directory. + dirID, err := makeDirsForPath(tx, path) + if err != nil { + return fmt.Errorf("failed to create directory for path %s: %w", path, err) + } + // Create the object. obj := dbObject{ - DBBucketID: mu.DBBucketID, - ObjectID: path, - Key: mu.Key, - Size: int64(size), - MimeType: mu.MimeType, - Etag: eTag, + DBDirectoryID: dirID, + DBBucketID: mu.DBBucketID, + ObjectID: path, + Key: mu.Key, + Size: int64(size), + MimeType: mu.MimeType, + Etag: eTag, } if err := tx.Create(&obj).Error; err != nil { return fmt.Errorf("failed to create object: %w", err) From c8edef320317c22c1f30406db927d9f122d0e7e7 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 15:26:56 +0200 Subject: [PATCH 18/52] e2e: TestS3List --- stores/metadata.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index 7b5fec6f9..6a0c3699d 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1246,7 +1246,7 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id -WHERE o.db_directory_id = ? AND b.name = ? AND %s +WHERE o.object_id != ? AND o.db_directory_id = ? AND b.name = ? AND %s UNION SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o @@ -1266,6 +1266,7 @@ GROUP BY d.id var objectsQueryParams []interface{} if prefix != "" { objectsQueryParams = []interface{}{ + path, dirID, bucket, utf8.RuneCountInString(path + prefix), path + prefix, path, @@ -1277,6 +1278,7 @@ GROUP BY d.id } } else { objectsQueryParams = []interface{}{ + path, dirID, bucket, path, path, From fdc70b622d0190ebdfa629d9497d3c59a44942c6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 16:03:08 +0200 Subject: [PATCH 19/52] e2e: fix TsetObjectEntries on MySQL --- stores/metadata.go | 2 ++ stores/migrations/mysql/main/schema.sql | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/stores/metadata.go b/stores/metadata.go index 6a0c3699d..bbb21697c 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1794,6 +1794,8 @@ func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND parent_id = ?", dir, dirID). Scan(&childID).Error; err != nil { return 0, fmt.Errorf("failed to fetch directory id %v: %w", dir, err) + } else if childID == 0 { + return 0, fmt.Errorf("dir we just created doesn't exist - shouldn't happen") } dirID = childID } diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index be8d72a9c..69f2542ba 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -321,7 +321,7 @@ CREATE TABLE `directories` ( `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), KEY `idx_directories_parent_id` (`parent_id`), - UNIQUE KEY `idx_directories_name` (`name`), + UNIQUE KEY `idx_directories_name_parent_id` (`name`, `parent_id`), CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; From 065edd984e181deca2cd6755eb2fde2b17352330 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 16:03:08 +0200 Subject: [PATCH 20/52] e2e: fix TsetObjectEntries on MySQL --- stores/metadata.go | 2 ++ stores/migrations/mysql/main/migration_00008_directories.sql | 2 +- stores/migrations/mysql/main/schema.sql | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 6a0c3699d..bbb21697c 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1794,6 +1794,8 @@ func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND parent_id = ?", dir, dirID). Scan(&childID).Error; err != nil { return 0, fmt.Errorf("failed to fetch directory id %v: %w", dir, err) + } else if childID == 0 { + return 0, fmt.Errorf("dir we just created doesn't exist - shouldn't happen") } dirID = childID } diff --git a/stores/migrations/mysql/main/migration_00008_directories.sql b/stores/migrations/mysql/main/migration_00008_directories.sql index 0f91e6360..5e7114976 100644 --- a/stores/migrations/mysql/main/migration_00008_directories.sql +++ b/stores/migrations/mysql/main/migration_00008_directories.sql @@ -6,7 +6,7 @@ CREATE TABLE `directories` ( `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), KEY `idx_directories_parent_id` (`parent_id`), - UNIQUE KEY `idx_directories_name` (`name`), + UNIQUE KEY `idx_directories_name_parent_id` (`name`, `parent_id`), CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index be8d72a9c..69f2542ba 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -321,7 +321,7 @@ CREATE TABLE `directories` ( `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), KEY `idx_directories_parent_id` (`parent_id`), - UNIQUE KEY `idx_directories_name` (`name`), + UNIQUE KEY `idx_directories_name_parent_id` (`name`, `parent_id`), CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; From beecd2b889c70955e90aca8d6f6fbe561f2f6f3a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 16:55:13 +0200 Subject: [PATCH 21/52] stores: add TestDirectories --- stores/metadata.go | 29 ++++++++++++-- stores/metadata_test.go | 88 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+), 3 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index bbb21697c..d51c33e6a 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2814,13 +2814,20 @@ func (s *SQLStore) pruneSlabsLoop() { } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second+sumDurations(s.retryTransactionIntervals)) - err := s.retryTransaction(ctx, pruneSlabs) + err := s.retryTransaction(ctx, func(tx *gorm.DB) error { + if err := pruneSlabs(tx); err != nil { + return fmt.Errorf("failed to prune slabs: %w", err) + } else if err := pruneDirs(tx); err != nil { + return fmt.Errorf("failed to prune directories: %w", err) + } + return nil + }) if err != nil { - s.logger.Errorw("failed to prune slabs", zap.Error(err)) + s.logger.Errorw("pruning failed", zap.Error(err)) s.alerts.RegisterAlert(s.shutdownCtx, alerts.Alert{ ID: pruneSlabsAlertID, Severity: alerts.SeverityWarning, - Message: "Failed to prune slabs from database", + Message: "Failed to prune database", Timestamp: time.Now(), Data: map[string]interface{}{ "error": err.Error(), @@ -2847,6 +2854,22 @@ AND slabs.db_buffered_slab_id IS NULL `).Error } +func pruneDirs(tx *gorm.DB) error { + for { + res := tx.Exec(` +DELETE +FROM directories +WHERE NOT EXISTS (SELECT 1 FROM objects WHERE objects.db_directory_id = directories.id) +AND NOT EXISTS (SELECT 1 FROM (SELECT 1 FROM directories AS d WHERE d.parent_id = directories.id) i) +`) + if res.Error != nil { + return res.Error + } else if res.RowsAffected == 0 { + return nil + } + } +} + func (s *SQLStore) triggerSlabPruning() { select { case s.slabPruneSigChan <- struct{}{}: diff --git a/stores/metadata_test.go b/stores/metadata_test.go index e46d15410..0936172a9 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4773,3 +4773,91 @@ func TestFetchUsedContracts(t *testing.T) { t.Fatal("contracts should point to the renewed contract") } } + +func TestDirectories(t *testing.T) { + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + objects := []string{ + "/foo", + "/bar/baz", + "///somefile", + "/dir/fakedir/", + "/", + "/bar/fileinsamedirasbefore", + } + + for _, o := range objects { + dirID, err := makeDirsForPath(ss.db, o) + if err != nil { + t.Fatal(err) + } else if dirID == 0 { + t.Fatalf("unexpected dir id %v", dirID) + } + } + + expectedDirs := []struct { + name string + id uint + parentID uint + }{ + { + name: "", + id: 1, + parentID: 0, + }, + { + name: "bar", + id: 2, + parentID: 1, + }, + { + name: "", + id: 3, + parentID: 1, + }, + { + name: "", + id: 4, + parentID: 3, + }, + { + name: "dir", + id: 2, + parentID: 1, + }, + { + name: "fakedir", + id: 4, + parentID: 3, + }, + } + + var dbDirs []dbDirectory + if err := ss.db.Find(&dbDirs).Error; err != nil { + t.Fatal(err) + } else if len(dbDirs) != len(expectedDirs) { + t.Fatalf("expected %v dirs, got %v", len(expectedDirs), len(dbDirs)) + } + + for i, dbDir := range dbDirs { + if dbDir.ID != uint(i+1) { + t.Fatalf("unexpected id %v", dbDir.ID) + } else if dbDir.Name != expectedDirs[i].name { + t.Fatalf("unexpected name '%v' != '%v'", dbDir.Name, expectedDirs[i].name) + } + } + + now := time.Now() + ss.triggerSlabPruning() + if err := ss.waitForPruneLoop(now); err != nil { + t.Fatal(err) + } + + var n int64 + if err := ss.db.Model(&dbDirectory{}).Count(&n).Error; err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal("expected 0 dirs, got", n) + } +} From fa1dcabd4d49e284e149cb363eb73d371092a8b2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 25 Apr 2024 17:03:13 +0200 Subject: [PATCH 22/52] stores: fix TestSlabCleanup --- stores/metadata.go | 3 ++- stores/metadata_test.go | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index d51c33e6a..0157f51df 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -2859,7 +2859,8 @@ func pruneDirs(tx *gorm.DB) error { res := tx.Exec(` DELETE FROM directories -WHERE NOT EXISTS (SELECT 1 FROM objects WHERE objects.db_directory_id = directories.id) +WHERE directories.id != 1 +AND NOT EXISTS (SELECT 1 FROM objects WHERE objects.db_directory_id = directories.id) AND NOT EXISTS (SELECT 1 FROM (SELECT 1 FROM directories AS d WHERE d.parent_id = directories.id) i) `) if res.Error != nil { diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 0936172a9..461aa60b3 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4857,7 +4857,7 @@ func TestDirectories(t *testing.T) { var n int64 if err := ss.db.Model(&dbDirectory{}).Count(&n).Error; err != nil { t.Fatal(err) - } else if n != 0 { - t.Fatal("expected 0 dirs, got", n) + } else if n != 1 { + t.Fatal("expected 1 dir, got", n) } } From ea62d62d653155cc4f567d4a1e015e7184d0843a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 26 Apr 2024 14:12:50 +0200 Subject: [PATCH 23/52] stores: deduplicate directories when migrating --- stores/migrations.go | 61 +++++++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 15 deletions(-) diff --git a/stores/migrations.go b/stores/migrations.go index 3e5a43caa..b6e91cf97 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -3,6 +3,8 @@ package stores import ( "errors" "fmt" + "strings" + "unicode/utf8" "github.com/go-gormigrate/gormigrate/v2" "go.sia.tech/renterd/internal/utils" @@ -71,37 +73,66 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { { ID: "00008_directories", Migrate: func(tx *gorm.DB) error { - if err := performMigration(tx, dbIdentifier, "00008_directories", logger); err != nil { + if err := performMigration(tx, dbIdentifier, "00008_directories_1", logger); err != nil { return fmt.Errorf("failed to migrate: %v", err) } - // loop over all objects one-by-one and create the corresponding directory + // loop over all objects and deduplicate dirs to create logger.Info("beginning post-migration directory creation, this might take a while") - for offset := 0; ; offset++ { - if offset > 0 && offset%1000 == 0 { + batchSize := 10000 + processedDirs := make(map[string]struct{}) + for offset := 0; ; offset += batchSize { + if offset > 0 && offset%batchSize == 0 { logger.Infof("processed %v objects", offset) } - var obj dbObject - if err := tx.Offset(offset).Limit(1).Take(&obj).Error; errors.Is(err, gorm.ErrRecordNotFound) { + var objBatch []dbObject + if err := tx. + Offset(offset). + Limit(batchSize). + Select("id", "object_id"). + Find(&objBatch).Error; err != nil { + return fmt.Errorf("failed to fetch objects: %v", err) + } else if len(objBatch) == 0 { break // done - } else if err != nil { - return fmt.Errorf("failed to fetch object: %v", err) } - dirID, err := makeDirsForPath(tx, obj.ObjectID) - if err != nil { - return fmt.Errorf("failed to create directory %s: %w", obj.ObjectID, err) - } - if err := tx.Where(obj).Update("db_directory_id", dirID).Error; err != nil { - return fmt.Errorf("failed to update object %s: %w", obj.ObjectID, err) + for _, obj := range objBatch { + // check if dir was processed + dir := "" // root + if i := strings.LastIndex(obj.ObjectID, "/"); i > -1 { + dir = obj.ObjectID[:i+1] + } + _, exists := processedDirs[dir] + if exists { + continue // already processed + } + processedDirs[dir] = struct{}{} + + // process + dirID, err := makeDirsForPath(tx, obj.ObjectID) + if err != nil { + return fmt.Errorf("failed to create directory %s: %w", obj.ObjectID, err) + } + if err := tx.Model(&dbObject{}). + Where("object_id LIKE ?", dir+"%"). // uses index but case sensitive + Where("SUBSTR(object_id, 1, ?) = ?", utf8.RuneCountInString(dir), dir). // exact comparison + Where("INSTR(SUBSTR(object_id, ?), '/') = 0", utf8.RuneCountInString(dir)+1). + Update("db_directory_id", dirID).Error; err != nil { + return fmt.Errorf("failed to update object %s: %w", obj.ObjectID, err) + } } } logger.Info("post-migration directory creation complete") + if err := performMigration(tx, dbIdentifier, "00008_directories_2", logger); err != nil { + return fmt.Errorf("failed to migrate: %v", err) + } return nil }, }, } // Create migrator. - m := gormigrate.New(db, gormigrate.DefaultOptions, migrations) + opts := gormigrate.DefaultOptions + opts.UseTransaction = true + m := gormigrate.New(db, opts, migrations) // Set init function. m.InitSchema(initSchema(dbIdentifier, logger)) From 639a7154ef2f606250265d5f83d555623ecd827f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 26 Apr 2024 15:27:04 +0200 Subject: [PATCH 24/52] stores: implement renaming --- stores/metadata.go | 21 ++++++++++++++++++--- stores/metadata_test.go | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 0157f51df..3cfee1ada 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1525,7 +1525,13 @@ func (s *SQLStore) RenameObject(ctx context.Context, bucket, keyOld, keyNew stri return fmt.Errorf("RenameObject: failed to delete object: %w", err) } } - tx = tx.Exec(`UPDATE objects SET object_id = ? WHERE object_id = ? AND ?`, keyNew, keyOld, sqlWhereBucket("objects", bucket)) + // create new dir + dirID, err := makeDirsForPath(tx, keyNew) + if err != nil { + return err + } + // update object + tx = tx.Exec(`UPDATE objects SET object_id = ?, db_directory_id = ? WHERE object_id = ? AND ?`, keyNew, dirID, keyOld, sqlWhereBucket("objects", bucket)) if tx.Error != nil && (strings.Contains(tx.Error.Error(), "UNIQUE constraint failed") || strings.Contains(tx.Error.Error(), "Duplicate entry")) { return api.ErrObjectExists @@ -1535,6 +1541,8 @@ func (s *SQLStore) RenameObject(ctx context.Context, bucket, keyOld, keyNew stri if tx.RowsAffected == 0 { return fmt.Errorf("%w: key %v", api.ErrObjectNotFound, keyOld) } + // delete old dir if empty + s.triggerSlabPruning() return nil }) } @@ -1558,8 +1566,13 @@ func (s *SQLStore) RenameObjects(ctx context.Context, bucket, prefixOld, prefixN return err } } - tx = tx.Exec("UPDATE objects SET object_id = "+sqlConcat(tx, "?", "SUBSTR(object_id, ?)")+" WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND ?", - prefixNew, utf8.RuneCountInString(prefixOld)+1, prefixOld+"%", utf8.RuneCountInString(prefixOld), prefixOld, sqlWhereBucket("objects", bucket)) + // create new dir + dirID, err := makeDirsForPath(tx, prefixNew) + if err != nil { + return err + } + tx = tx.Exec("UPDATE objects SET object_id = "+sqlConcat(tx, "?", "SUBSTR(object_id, ?)")+", db_directory_id = ? WHERE object_id LIKE ? AND SUBSTR(object_id, 1, ?) = ? AND ?", + prefixNew, utf8.RuneCountInString(prefixOld)+1, dirID, prefixOld+"%", utf8.RuneCountInString(prefixOld), prefixOld, sqlWhereBucket("objects", bucket)) if tx.Error != nil && (strings.Contains(tx.Error.Error(), "UNIQUE constraint failed") || strings.Contains(tx.Error.Error(), "Duplicate entry")) { return api.ErrObjectExists @@ -1569,6 +1582,8 @@ func (s *SQLStore) RenameObjects(ctx context.Context, bucket, prefixOld, prefixN if tx.RowsAffected == 0 { return fmt.Errorf("%w: prefix %v", api.ErrObjectNotFound, prefixOld) } + // delete old dir if empty + s.triggerSlabPruning() return nil }) } diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 461aa60b3..185a40897 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2470,6 +2470,43 @@ func TestRenameObjects(t *testing.T) { t.Fatal("unexpected path", obj.Name) } } + + // Assert directories are correct + expectedDirs := []struct { + id uint + parentID uint + name string + }{ + { + id: 1, + parentID: 0, + name: "", + }, + { + id: 2, + parentID: 1, + name: "fileś", + }, + } + var directories []dbDirectory + test.Retry(100, 100*time.Millisecond, func() error { + if err := ss.db.Find(&directories).Error; err != nil { + return err + } else if len(directories) != len(expectedDirs) { + return fmt.Errorf("unexpected number of directories, %v != %v", len(directories), len(expectedDirs)) + } + return nil + }) + + for i, dir := range directories { + if dir.ID != expectedDirs[i].id { + t.Fatalf("unexpected directory id, %v != %v", dir.ID, expectedDirs[i].id) + } else if dir.ParentID != expectedDirs[i].parentID { + t.Fatalf("unexpected directory parent id, %v != %v", dir.ParentID, expectedDirs[i].parentID) + } else if dir.Name != expectedDirs[i].name { + t.Fatalf("unexpected directory name, %v != %v", dir.Name, expectedDirs[i].name) + } + } } // TestObjectsStats is a unit test for ObjectsStats. From 4673b2b877ebf25a709cfbdb6c72e24008a78d3f Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 26 Apr 2024 15:50:45 +0200 Subject: [PATCH 25/52] stores: fix NDF in RenameObjectBlocknig --- stores/metadata_test.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 185a40897..bf5c6f742 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -41,6 +41,22 @@ func (s *SQLStore) RemoveObjectsBlocking(ctx context.Context, bucket, prefix str return s.waitForPruneLoop(ts) } +func (s *SQLStore) RenameObjectBlocking(ctx context.Context, bucket, keyOld, keyNew string, force bool) error { + ts := time.Now() + if err := s.RenameObject(ctx, bucket, keyOld, keyNew, force); err != nil { + return err + } + return s.waitForPruneLoop(ts) +} + +func (s *SQLStore) RenameObjectsBlocking(ctx context.Context, bucket, prefixOld, prefixNew string, force bool) error { + ts := time.Now() + if err := s.RenameObjects(ctx, bucket, prefixOld, prefixNew, force); err != nil { + return err + } + return s.waitForPruneLoop(ts) +} + func (s *SQLStore) UpdateObjectBlocking(ctx context.Context, bucket, path, contractSet, eTag, mimeType string, metadata api.ObjectUserMetadata, o object.Object) error { var ts time.Time _, err := s.Object(ctx, bucket, path) @@ -3282,7 +3298,7 @@ func TestBucketObjects(t *testing.T) { } // Rename object foo/bar in bucket 1 to foo/baz but not in bucket 2. - if err := ss.RenameObject(context.Background(), b1, "/foo/bar", "/foo/baz", false); err != nil { + if err := ss.RenameObjectBlocking(context.Background(), b1, "/foo/bar", "/foo/baz", false); err != nil { t.Fatal(err) } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { t.Fatal(err) @@ -3299,7 +3315,7 @@ func TestBucketObjects(t *testing.T) { } // Rename foo/bar in bucket 2 using the batch rename. - if err := ss.RenameObjects(context.Background(), b2, "/foo/bar", "/foo/bam", false); err != nil { + if err := ss.RenameObjectsBlocking(context.Background(), b2, "/foo/bar", "/foo/bam", false); err != nil { t.Fatal(err) } else if entries, _, err := ss.ObjectEntries(context.Background(), b1, "/foo/", "", "", "", "", 0, -1); err != nil { t.Fatal(err) From d04ea6b659c8d1cab87b8f5d47751fb7ded8cec0 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 29 Apr 2024 14:55:47 +0200 Subject: [PATCH 26/52] stores: recursive solution --- stores/metadata.go | 68 ++++++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 3cfee1ada..a456a7432 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1234,57 +1234,61 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort prefixExpr = "SUBSTR(o.object_id, 1, ?) = ?" } - lengthFn := "CHAR_LENGTH" - if isSQLite(s.db) { - lengthFn = "LENGTH" - } - // objectsQuery consists of 2 parts // 1. fetch all objects in requested directory // 2. fetch all sub-directories objectsQuery := fmt.Sprintf(` +WITH RECURSIVE subdirectories AS ( + SELECT id, parent_id, name, id as root_id + FROM directories + WHERE parent_id = ? + UNION ALL + SELECT d.id, d.parent_id, d.name, root_id + FROM directories d + INNER JOIN subdirectories sd ON sd.id = d.parent_id +) SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id WHERE o.object_id != ? AND o.db_directory_id = ? AND b.name = ? AND %s -UNION -SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType -FROM objects o -INNER JOIN buckets b ON o.db_bucket_id = b.id -INNER JOIN directories d ON o.object_id LIKE %s AND SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s -WHERE b.name = ? AND d.parent_id = ? -GROUP BY d.id +UNION ALL +SELECT '' as ETag, ModTime, %s as ObjectName, Size, Health, '' as MimeType +FROM directories outer_dirs +INNER JOIN ( + SELECT + d.root_id, + MAX(o.created_at) as ModTime, + SUM(o.size) as Size, + MIN(o.health) as Health + FROM objects o + INNER JOIN buckets b ON o.db_bucket_id = b.id + INNER JOIN subdirectories d ON d.id = o.db_directory_id + WHERE b.name = ? AND %s + GROUP BY d.root_id +) as aggregated ON outer_dirs.id = aggregated.root_id +WHERE outer_dirs.parent_id = ? `, prefixExpr, - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/%'"), - lengthFn, - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), - prefixExpr) + sqlConcat(s.db, sqlConcat(s.db, "?", "outer_dirs.name"), "'/'"), + prefixExpr, + ) // build query params var objectsQueryParams []interface{} if prefix != "" { objectsQueryParams = []interface{}{ + dirID, + path, dirID, bucket, utf8.RuneCountInString(path + prefix), path + prefix, path, - dirID, bucket, - utf8.RuneCountInString(path + prefix), path + prefix, - path, - path, - path, - path, - utf8.RuneCountInString(path + prefix), path + prefix, - bucket, dirID, + bucket, utf8.RuneCountInString(path + prefix), path + prefix, + dirID, } } else { objectsQueryParams = []interface{}{ + dirID, + path, dirID, bucket, path, - dirID, bucket, - path, - path, - path, - path, - bucket, dirID, + bucket, + dirID, } } From e768f1306cde66bb4324ded9564578a8f2ec1c4a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 29 Apr 2024 17:13:00 +0200 Subject: [PATCH 27/52] Revert "stores: recursive solution" This reverts commit d04ea6b659c8d1cab87b8f5d47751fb7ded8cec0. --- stores/metadata.go | 68 ++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index a456a7432..3cfee1ada 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1234,61 +1234,57 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort prefixExpr = "SUBSTR(o.object_id, 1, ?) = ?" } + lengthFn := "CHAR_LENGTH" + if isSQLite(s.db) { + lengthFn = "LENGTH" + } + // objectsQuery consists of 2 parts // 1. fetch all objects in requested directory // 2. fetch all sub-directories objectsQuery := fmt.Sprintf(` -WITH RECURSIVE subdirectories AS ( - SELECT id, parent_id, name, id as root_id - FROM directories - WHERE parent_id = ? - UNION ALL - SELECT d.id, d.parent_id, d.name, root_id - FROM directories d - INNER JOIN subdirectories sd ON sd.id = d.parent_id -) SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id WHERE o.object_id != ? AND o.db_directory_id = ? AND b.name = ? AND %s -UNION ALL -SELECT '' as ETag, ModTime, %s as ObjectName, Size, Health, '' as MimeType -FROM directories outer_dirs -INNER JOIN ( - SELECT - d.root_id, - MAX(o.created_at) as ModTime, - SUM(o.size) as Size, - MIN(o.health) as Health - FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id - INNER JOIN subdirectories d ON d.id = o.db_directory_id - WHERE b.name = ? AND %s - GROUP BY d.root_id -) as aggregated ON outer_dirs.id = aggregated.root_id -WHERE outer_dirs.parent_id = ? +UNION +SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType +FROM objects o +INNER JOIN buckets b ON o.db_bucket_id = b.id +INNER JOIN directories d ON o.object_id LIKE %s AND SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s +WHERE b.name = ? AND d.parent_id = ? +GROUP BY d.id `, prefixExpr, - sqlConcat(s.db, sqlConcat(s.db, "?", "outer_dirs.name"), "'/'"), - prefixExpr, - ) + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/%'"), + lengthFn, + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), + prefixExpr) // build query params var objectsQueryParams []interface{} if prefix != "" { objectsQueryParams = []interface{}{ - dirID, - path, dirID, bucket, utf8.RuneCountInString(path + prefix), path + prefix, path, - bucket, utf8.RuneCountInString(path + prefix), path + prefix, - dirID, + dirID, bucket, + utf8.RuneCountInString(path + prefix), path + prefix, + path, + path, + path, + path, + utf8.RuneCountInString(path + prefix), path + prefix, + bucket, dirID, } } else { objectsQueryParams = []interface{}{ - dirID, - path, dirID, bucket, path, - bucket, - dirID, + dirID, bucket, + path, + path, + path, + path, + bucket, dirID, } } From 2f1f427d3d1429c301e3dcff12ab180b4345c3cf Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 29 Apr 2024 17:13:12 +0200 Subject: [PATCH 28/52] stores: add migration files --- .../mysql/main/migration_00008_directories_1.sql | 16 ++++++++++++++++ .../mysql/main/migration_00008_directories_2.sql | 6 ++++++ 2 files changed, 22 insertions(+) create mode 100644 stores/migrations/mysql/main/migration_00008_directories_1.sql create mode 100644 stores/migrations/mysql/main/migration_00008_directories_2.sql diff --git a/stores/migrations/mysql/main/migration_00008_directories_1.sql b/stores/migrations/mysql/main/migration_00008_directories_1.sql new file mode 100644 index 000000000..acf80d35b --- /dev/null +++ b/stores/migrations/mysql/main/migration_00008_directories_1.sql @@ -0,0 +1,16 @@ +-- dbDirectory +CREATE TABLE IF NOT EXISTS `directories` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `parent_id` bigint unsigned, + `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `idx_directories_parent_id` (`parent_id`), + UNIQUE KEY `idx_directories_name_parent_id` (`name`, `parent_id`), + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + +-- dbObject: add column and constraint +ALTER TABLE `objects` +ADD COLUMN `db_directory_id` bigint unsigned, +ADD CONSTRAINT `fk_objects_db_directory_id` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`); \ No newline at end of file diff --git a/stores/migrations/mysql/main/migration_00008_directories_2.sql b/stores/migrations/mysql/main/migration_00008_directories_2.sql new file mode 100644 index 000000000..2ca36a429 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00008_directories_2.sql @@ -0,0 +1,6 @@ +-- Add NOT NULL to column +ALTER TABLE `objects` +MODIFY COLUMN `db_directory_id` bigint unsigned NOT NULL; + +-- Create index +CREATE INDEX `idx_objects_db_directory_id` ON `objects` (`db_directory_id`); \ No newline at end of file From 75fcc624d8603750cb51c740ef67c4ad4f3f6fc1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 29 Apr 2024 17:29:43 +0200 Subject: [PATCH 29/52] Revert "Revert "stores: recursive solution"" This reverts commit e768f1306cde66bb4324ded9564578a8f2ec1c4a. --- stores/metadata.go | 68 ++++++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 3cfee1ada..a456a7432 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1234,57 +1234,61 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort prefixExpr = "SUBSTR(o.object_id, 1, ?) = ?" } - lengthFn := "CHAR_LENGTH" - if isSQLite(s.db) { - lengthFn = "LENGTH" - } - // objectsQuery consists of 2 parts // 1. fetch all objects in requested directory // 2. fetch all sub-directories objectsQuery := fmt.Sprintf(` +WITH RECURSIVE subdirectories AS ( + SELECT id, parent_id, name, id as root_id + FROM directories + WHERE parent_id = ? + UNION ALL + SELECT d.id, d.parent_id, d.name, root_id + FROM directories d + INNER JOIN subdirectories sd ON sd.id = d.parent_id +) SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType FROM objects o INNER JOIN buckets b ON o.db_bucket_id = b.id WHERE o.object_id != ? AND o.db_directory_id = ? AND b.name = ? AND %s -UNION -SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType -FROM objects o -INNER JOIN buckets b ON o.db_bucket_id = b.id -INNER JOIN directories d ON o.object_id LIKE %s AND SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s -WHERE b.name = ? AND d.parent_id = ? -GROUP BY d.id +UNION ALL +SELECT '' as ETag, ModTime, %s as ObjectName, Size, Health, '' as MimeType +FROM directories outer_dirs +INNER JOIN ( + SELECT + d.root_id, + MAX(o.created_at) as ModTime, + SUM(o.size) as Size, + MIN(o.health) as Health + FROM objects o + INNER JOIN buckets b ON o.db_bucket_id = b.id + INNER JOIN subdirectories d ON d.id = o.db_directory_id + WHERE b.name = ? AND %s + GROUP BY d.root_id +) as aggregated ON outer_dirs.id = aggregated.root_id +WHERE outer_dirs.parent_id = ? `, prefixExpr, - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/%'"), - lengthFn, - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), - prefixExpr) + sqlConcat(s.db, sqlConcat(s.db, "?", "outer_dirs.name"), "'/'"), + prefixExpr, + ) // build query params var objectsQueryParams []interface{} if prefix != "" { objectsQueryParams = []interface{}{ + dirID, + path, dirID, bucket, utf8.RuneCountInString(path + prefix), path + prefix, path, - dirID, bucket, - utf8.RuneCountInString(path + prefix), path + prefix, - path, - path, - path, - path, - utf8.RuneCountInString(path + prefix), path + prefix, - bucket, dirID, + bucket, utf8.RuneCountInString(path + prefix), path + prefix, + dirID, } } else { objectsQueryParams = []interface{}{ + dirID, + path, dirID, bucket, path, - dirID, bucket, - path, - path, - path, - path, - bucket, dirID, + bucket, + dirID, } } From 2f745dfb960dd91edbcf229daa527a1ba5b93ac3 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Mon, 29 Apr 2024 17:57:05 +0200 Subject: [PATCH 30/52] stores: pre-fetch bucket --- stores/metadata.go | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index a456a7432..e9a043321 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1228,6 +1228,14 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort return nil, false, err } + // fetch bucket id + var dBucket dbBucket + if err := s.db.Select("id"). + Where("name", bucket). + Take(&dBucket).Error; err != nil { + return nil, false, fmt.Errorf("failed to fetch bucket id: %w", err) + } + // build prefix expression prefixExpr := "TRUE" if prefix != "" { @@ -1249,8 +1257,7 @@ WITH RECURSIVE subdirectories AS ( ) SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType FROM objects o -INNER JOIN buckets b ON o.db_bucket_id = b.id -WHERE o.object_id != ? AND o.db_directory_id = ? AND b.name = ? AND %s +WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = ? AND %s UNION ALL SELECT '' as ETag, ModTime, %s as ObjectName, Size, Health, '' as MimeType FROM directories outer_dirs @@ -1261,9 +1268,8 @@ INNER JOIN ( SUM(o.size) as Size, MIN(o.health) as Health FROM objects o - INNER JOIN buckets b ON o.db_bucket_id = b.id INNER JOIN subdirectories d ON d.id = o.db_directory_id - WHERE b.name = ? AND %s + WHERE o.db_bucket_id = ? AND %s GROUP BY d.root_id ) as aggregated ON outer_dirs.id = aggregated.root_id WHERE outer_dirs.parent_id = ? @@ -1277,17 +1283,17 @@ WHERE outer_dirs.parent_id = ? if prefix != "" { objectsQueryParams = []interface{}{ dirID, - path, dirID, bucket, utf8.RuneCountInString(path + prefix), path + prefix, + path, dirID, dBucket.ID, utf8.RuneCountInString(path + prefix), path + prefix, path, - bucket, utf8.RuneCountInString(path + prefix), path + prefix, + dBucket.ID, utf8.RuneCountInString(path + prefix), path + prefix, dirID, } } else { objectsQueryParams = []interface{}{ dirID, - path, dirID, bucket, + path, dirID, dBucket.ID, path, - bucket, + dBucket.ID, dirID, } } @@ -3065,7 +3071,9 @@ func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sort Model(&dbObject{}). Table("objects o"). Joins("INNER JOIN buckets b ON o.db_bucket_id = b.id"). - Where("b.name = ? AND ? AND ?", bucket, prefixExpr, markerExpr). + Where("o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?)", bucket). + Where("?", prefixExpr). + Where("?", markerExpr). Order(orderBy). Order(markerOrderBy). Order("ObjectName ASC"). From bff67dd2d2323fd4c9893ab347918e241504b1f8 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 30 Apr 2024 10:38:35 +0200 Subject: [PATCH 31/52] stores: use full paths for dirs --- stores/metadata.go | 71 +++++++++---------- stores/metadata_test.go | 15 ++-- .../main/migration_00008_directories_1.sql | 16 +++++ .../main/migration_00008_directories_2.sql | 6 ++ stores/migrations/mysql/main/schema.sql | 2 +- stores/migrations/sqlite/main/schema.sql | 2 +- 6 files changed, 61 insertions(+), 51 deletions(-) create mode 100644 stores/migrations/mysql/main/migration_00008_directories_1.sql create mode 100644 stores/migrations/mysql/main/migration_00008_directories_2.sql diff --git a/stores/metadata.go b/stores/metadata.go index 3cfee1ada..ae4cb009c 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1245,46 +1245,32 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort objectsQuery := fmt.Sprintf(` SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType FROM objects o -INNER JOIN buckets b ON o.db_bucket_id = b.id -WHERE o.object_id != ? AND o.db_directory_id = ? AND b.name = ? AND %s +WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND %s UNION -SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType +SELECT '' as ETag, MAX(o.created_at) as ModTime, d.name as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o -INNER JOIN buckets b ON o.db_bucket_id = b.id -INNER JOIN directories d ON o.object_id LIKE %s AND SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s -WHERE b.name = ? AND d.parent_id = ? +INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name AND %s +WHERE (SELECT id FROM buckets b WHERE b.name = ?) AND d.parent_id = ? GROUP BY d.id - `, prefixExpr, - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/%'"), +`, prefixExpr, lengthFn, - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), prefixExpr) // build query params var objectsQueryParams []interface{} if prefix != "" { objectsQueryParams = []interface{}{ - path, - dirID, bucket, + path, // o.object_id != ? + dirID, bucket, // o.db_directory_id = ? AND b.name = ? utf8.RuneCountInString(path + prefix), path + prefix, - path, - path, - path, - path, utf8.RuneCountInString(path + prefix), path + prefix, - bucket, dirID, + bucket, dirID, // b.name = ? AND d.parent_id = ? } } else { objectsQueryParams = []interface{}{ - path, - dirID, bucket, - path, - path, - path, - path, - bucket, dirID, + path, // o.object_id != ? + dirID, bucket, // o.db_directory_id = ? AND b.name = ? + bucket, dirID, // b.name = ? AND d.parent_id = ? } } @@ -1768,14 +1754,9 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { dirID := uint(1) if dirPath == "/" { return dirID, nil // root dir returned - } - - splitPath := strings.Split(dirPath[1:len(dirPath)-1], "/") - for _, dir := range splitPath { - if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND parent_id = ?", dir, dirID). - Scan(&dirID).Error; err != nil { - return 0, fmt.Errorf("failed to fetch root directory: %w", err) - } + } else if err := tx.Raw("SELECT id FROM directories WHERE name = ?", dirPath). + Scan(&dirID).Error; err != nil { + return 0, fmt.Errorf("failed to fetch root directory: %w", err) } return dirID, nil } @@ -1787,15 +1768,25 @@ func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { Clauses(clause.OnConflict{ DoNothing: true, }).Create(map[string]any{ - "id": dirID, + "id": dirID, + "name": "/", }).Error; err != nil { return 0, fmt.Errorf("failed to create root directory: %w", err) } // Create remaining directories. - path = strings.TrimPrefix(path, "/") - splitPath := strings.Split(path, "/") - for _, dir := range splitPath[:len(splitPath)-1] { + path = strings.TrimSuffix(path, "/") + if path == "/" { + return dirID, nil + } + for i := 0; i < utf8.RuneCountInString(path); i++ { + if path[i] != '/' { + continue + } + dir := path[:i+1] + if dir == "/" { + continue + } if err := tx.Clauses(clause.OnConflict{ DoNothing: true, }). @@ -1806,7 +1797,7 @@ func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { return 0, fmt.Errorf("failed to create directory %v: %w", dir, err) } var childID uint - if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND parent_id = ?", dir, dirID). + if err := tx.Raw("SELECT id FROM directories WHERE name = ?", dir). Scan(&childID).Error; err != nil { return 0, fmt.Errorf("failed to fetch directory id %v: %w", dir, err) } else if childID == 0 { @@ -3061,7 +3052,9 @@ func (s *SQLStore) ListObjects(ctx context.Context, bucket, prefix, sortBy, sort Model(&dbObject{}). Table("objects o"). Joins("INNER JOIN buckets b ON o.db_bucket_id = b.id"). - Where("b.name = ? AND ? AND ?", bucket, prefixExpr, markerExpr). + Where("b.name = (SELECT id FROM buckets b WHERE b.name = ?)", bucket). + Where("?", prefixExpr). + Where("?", markerExpr). Order(orderBy). Order(markerOrderBy). Order("ObjectName ASC"). diff --git a/stores/metadata_test.go b/stores/metadata_test.go index bf5c6f742..5efc098f0 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -4855,35 +4855,30 @@ func TestDirectories(t *testing.T) { parentID uint }{ { - name: "", + name: "/", id: 1, parentID: 0, }, { - name: "bar", + name: "/bar/", id: 2, parentID: 1, }, { - name: "", + name: "//", id: 3, parentID: 1, }, { - name: "", + name: "///", id: 4, parentID: 3, }, { - name: "dir", + name: "/dir/", id: 2, parentID: 1, }, - { - name: "fakedir", - id: 4, - parentID: 3, - }, } var dbDirs []dbDirectory diff --git a/stores/migrations/mysql/main/migration_00008_directories_1.sql b/stores/migrations/mysql/main/migration_00008_directories_1.sql new file mode 100644 index 000000000..acf80d35b --- /dev/null +++ b/stores/migrations/mysql/main/migration_00008_directories_1.sql @@ -0,0 +1,16 @@ +-- dbDirectory +CREATE TABLE IF NOT EXISTS `directories` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `parent_id` bigint unsigned, + `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, + PRIMARY KEY (`id`), + KEY `idx_directories_parent_id` (`parent_id`), + UNIQUE KEY `idx_directories_name_parent_id` (`name`, `parent_id`), + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; + +-- dbObject: add column and constraint +ALTER TABLE `objects` +ADD COLUMN `db_directory_id` bigint unsigned, +ADD CONSTRAINT `fk_objects_db_directory_id` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`); \ No newline at end of file diff --git a/stores/migrations/mysql/main/migration_00008_directories_2.sql b/stores/migrations/mysql/main/migration_00008_directories_2.sql new file mode 100644 index 000000000..2ca36a429 --- /dev/null +++ b/stores/migrations/mysql/main/migration_00008_directories_2.sql @@ -0,0 +1,6 @@ +-- Add NOT NULL to column +ALTER TABLE `objects` +MODIFY COLUMN `db_directory_id` bigint unsigned NOT NULL; + +-- Create index +CREATE INDEX `idx_objects_db_directory_id` ON `objects` (`db_directory_id`); \ No newline at end of file diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 69f2542ba..be8d72a9c 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -321,7 +321,7 @@ CREATE TABLE `directories` ( `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), KEY `idx_directories_parent_id` (`parent_id`), - UNIQUE KEY `idx_directories_name_parent_id` (`name`, `parent_id`), + UNIQUE KEY `idx_directories_name` (`name`), CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index b57a600d1..1519661f3 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -47,7 +47,7 @@ CREATE INDEX `idx_buckets_name` ON `buckets`(`name`); -- dbDirectory CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories`(`id`)); CREATE INDEX `idx_directories_parent_id` ON `directories`(`parent_id`); -CREATE UNIQUE INDEX `idx_directories_name_parent_id` ON `directories`(`name`, `parent_id`); +CREATE UNIQUE INDEX `idx_directories_name` ON `directories`(`name`); -- dbObject CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); From a4db91cee9d4efd0db577165eeffd6dedae41148 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 30 Apr 2024 10:50:17 +0200 Subject: [PATCH 32/52] stores: fix TestRenameObjects --- stores/metadata_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 5efc098f0..f5e012f10 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2496,12 +2496,12 @@ func TestRenameObjects(t *testing.T) { { id: 1, parentID: 0, - name: "", + name: "/", }, { id: 2, parentID: 1, - name: "fileś", + name: "/fileś/", }, } var directories []dbDirectory From 25ae076ee941fd1cb395d44942dc8552afa22ff1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 30 Apr 2024 11:09:33 +0200 Subject: [PATCH 33/52] e2e: fix TestObjectEntries --- internal/test/e2e/cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 16a335265..1a1f1200a 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -251,7 +251,7 @@ func TestObjectEntries(t *testing.T) { entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasSuffix(entries[i].Name, "/") + isDir := strings.HasSuffix(entries[i].Name, "/") && entries[i].Name != "//double/" // double is a 'fake' dir if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { t.Fatal("unexpected mime type", entries[i].MimeType) } From f810b867a8ba3ba2512f4185bf693bc2a8d1bb46 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 30 Apr 2024 11:51:30 +0200 Subject: [PATCH 34/52] e2e: fix TestUploadDownloadExtended --- stores/metadata.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index fb02cf374..8806fd765 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1224,7 +1224,9 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort // fetch id of directory to query dirID, err := s.dirID(s.db, path) - if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return []api.ObjectMetadata{}, false, nil + } else if err != nil { return nil, false, err } @@ -1759,14 +1761,18 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { return 0, fmt.Errorf("path must end with /") } - dirID := uint(1) if dirPath == "/" { - return dirID, nil // root dir returned - } else if err := tx.Raw("SELECT id FROM directories WHERE name = ?", dirPath). - Scan(&dirID).Error; err != nil { - return 0, fmt.Errorf("failed to fetch root directory: %w", err) + return 1, nil // root dir returned } - return dirID, nil + + var dir dbDirectory + if err := tx.Where("name", dirPath). + Select("id"). + Take(&dir). + Error; err != nil { + return 0, fmt.Errorf("failed to fetch directory: %w", err) + } + return dir.ID, nil } func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { From ec3802f38f9e15631e96107b837f0e6c6916c688 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 30 Apr 2024 15:49:45 +0200 Subject: [PATCH 35/52] stores: fix index creation in mysql migration --- stores/migrations/mysql/main/migration_00008_directories.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stores/migrations/mysql/main/migration_00008_directories.sql b/stores/migrations/mysql/main/migration_00008_directories.sql index 5e7114976..0f91e6360 100644 --- a/stores/migrations/mysql/main/migration_00008_directories.sql +++ b/stores/migrations/mysql/main/migration_00008_directories.sql @@ -6,7 +6,7 @@ CREATE TABLE `directories` ( `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), KEY `idx_directories_parent_id` (`parent_id`), - UNIQUE KEY `idx_directories_name_parent_id` (`name`, `parent_id`), + UNIQUE KEY `idx_directories_name` (`name`), CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; From 68bc5459ba95cf27b8c79a40435a4dabeba4dde1 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Thu, 2 May 2024 11:39:19 +0200 Subject: [PATCH 36/52] stores: union all instead of union --- stores/metadata.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 8806fd765..ff6b58b1a 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1256,11 +1256,14 @@ func (s *SQLStore) ObjectEntries(ctx context.Context, bucket, path, prefix, sort SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.size as Size, o.health as Health, o.mime_type as MimeType FROM objects o WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND %s -UNION +UNION ALL SELECT '' as ETag, MAX(o.created_at) as ModTime, d.name as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name AND %s -WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND d.parent_id = ? +WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) +AND o.object_id LIKE ? +AND SUBSTR(o.object_id, 1, ?) = ? +AND d.parent_id = ? GROUP BY d.id `, prefixExpr, lengthFn, @@ -1274,13 +1277,19 @@ GROUP BY d.id dirID, bucket, // o.db_directory_id = ? AND b.name = ? utf8.RuneCountInString(path + prefix), path + prefix, utf8.RuneCountInString(path + prefix), path + prefix, - bucket, dirID, // b.name = ? AND d.parent_id = ? + bucket, // b.name = ? + path + "%", // o.object_id LIKE ? + utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? + dirID, // d.parent_id = ? } } else { objectsQueryParams = []interface{}{ path, // o.object_id != ? dirID, bucket, // o.db_directory_id = ? AND b.name = ? - bucket, dirID, // b.name = ? AND d.parent_id = ? + bucket, // b.name = ? + path + "%", // o.object_id LIKE ? + utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? + dirID, // d.parent_id = ? } } @@ -1340,7 +1349,7 @@ GROUP BY d.id sortBy = "ObjectName" } orderByClause := fmt.Sprintf("%s %s", sortBy, sortDir) - if sortBy != api.ObjectSortByName { + if sortBy != "ObjectName" { orderByClause += ", ObjectName" } From 7ba50647a496a2e0c0af9c5dffa29c3dd09df530 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 3 May 2024 13:16:10 +0200 Subject: [PATCH 37/52] stores: only store dir name --- stores/metadata.go | 43 ++++++++++-------------- stores/migrations/mysql/main/schema.sql | 3 +- stores/migrations/sqlite/main/schema.sql | 2 +- 3 files changed, 20 insertions(+), 28 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index ff6b58b1a..398caa18e 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1257,16 +1257,18 @@ SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.siz FROM objects o WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND %s UNION ALL -SELECT '' as ETag, MAX(o.created_at) as ModTime, d.name as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType +SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o -INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name AND %s +INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = %s AND %s WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ? AND d.parent_id = ? GROUP BY d.id `, prefixExpr, + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), lengthFn, + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/%'"), prefixExpr) // build query params @@ -1286,6 +1288,7 @@ GROUP BY d.id objectsQueryParams = []interface{}{ path, // o.object_id != ? dirID, bucket, // o.db_directory_id = ? AND b.name = ? + path, path, bucket, // b.name = ? path + "%", // o.object_id LIKE ? utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? @@ -1769,19 +1772,19 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { } else if !strings.HasSuffix(dirPath, "/") { return 0, fmt.Errorf("path must end with /") } - + dirID := uint(1) if dirPath == "/" { - return 1, nil // root dir returned + return dirID, nil // root dir returned } - var dir dbDirectory - if err := tx.Where("name", dirPath). - Select("id"). - Take(&dir). - Error; err != nil { - return 0, fmt.Errorf("failed to fetch directory: %w", err) + splitPath := strings.Split(dirPath[1:len(dirPath)-1], "/") + for _, dir := range splitPath { + if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND parent_id = ?", dir, dirID). + Scan(&dirID).Error; err != nil { + return 0, fmt.Errorf("failed to fetch root directory: %w", err) + } } - return dir.ID, nil + return dirID, nil } func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { @@ -1791,25 +1794,15 @@ func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { Clauses(clause.OnConflict{ DoNothing: true, }).Create(map[string]any{ - "id": dirID, - "name": "/", + "id": dirID, }).Error; err != nil { return 0, fmt.Errorf("failed to create root directory: %w", err) } // Create remaining directories. - path = strings.TrimSuffix(path, "/") - if path == "/" { - return dirID, nil - } - for i := 0; i < utf8.RuneCountInString(path); i++ { - if path[i] != '/' { - continue - } - dir := path[:i+1] - if dir == "/" { - continue - } + path = strings.TrimPrefix(path, "/") + splitPath := strings.Split(path, "/") + for _, dir := range splitPath[:len(splitPath)-1] { if err := tx.Clauses(clause.OnConflict{ DoNothing: true, }). diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index be8d72a9c..4897f454d 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -320,8 +320,7 @@ CREATE TABLE `directories` ( `parent_id` bigint unsigned, `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), - KEY `idx_directories_parent_id` (`parent_id`), - UNIQUE KEY `idx_directories_name` (`name`), + UNIQUE KEY `idx_directories_parent_id_name` (`parent_id`, `name`), CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 1519661f3..8e66d9975 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -47,7 +47,7 @@ CREATE INDEX `idx_buckets_name` ON `buckets`(`name`); -- dbDirectory CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories`(`id`)); CREATE INDEX `idx_directories_parent_id` ON `directories`(`parent_id`); -CREATE UNIQUE INDEX `idx_directories_name` ON `directories`(`name`); +CREATE UNIQUE INDEX `idx_directories_parent_id_name` ON `directories`(`parent_id`, `name`); -- dbObject CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); From 44f0ad03855c9acd43e7242b84b11004f5719abc Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 3 May 2024 13:50:15 +0200 Subject: [PATCH 38/52] stores: sqlConcat variable params --- stores/metadata.go | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 398caa18e..2282c614a 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1259,7 +1259,7 @@ WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id UNION ALL SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o -INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = %s AND %s +INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ? @@ -1268,7 +1268,8 @@ GROUP BY d.id `, prefixExpr, sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), lengthFn, - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/%'"), + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), + sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), prefixExpr) // build query params @@ -1278,6 +1279,9 @@ GROUP BY d.id path, // o.object_id != ? dirID, bucket, // o.db_directory_id = ? AND b.name = ? utf8.RuneCountInString(path + prefix), path + prefix, + path, + path, + path, utf8.RuneCountInString(path + prefix), path + prefix, bucket, // b.name = ? path + "%", // o.object_id LIKE ? @@ -1288,7 +1292,7 @@ GROUP BY d.id objectsQueryParams = []interface{}{ path, // o.object_id != ? dirID, bucket, // o.db_directory_id = ? AND b.name = ? - path, path, + path, path, path, bucket, // b.name = ? path + "%", // o.object_id LIKE ? utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? @@ -3018,11 +3022,21 @@ func (s *SQLStore) invalidateSlabHealthByFCID(ctx context.Context, fcids []fileC } // nolint:unparam -func sqlConcat(db *gorm.DB, a, b string) string { +func sqlConcat(db *gorm.DB, s ...string) string { + if len(s) < 2 { + panic("sqlConcat: need at least two arguments") + } + query := s[0] if isSQLite(db) { - return fmt.Sprintf("%s || %s", a, b) + for i := 1; i < len(s); i++ { + query = fmt.Sprintf("%s || %s", query, s[i]) + } + return query + } + for i := 1; i < len(s); i++ { + query = fmt.Sprintf("%s, %s", query, s[i]) } - return fmt.Sprintf("CONCAT(%s, %s)", a, b) + return fmt.Sprintf("CONCAT(%s)", query) } func sqlRandomTimestamp(db *gorm.DB, now time.Time, min, max time.Duration) clause.Expr { From c8c65c59194b6847c487e8a2133d441d7019bfe2 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 3 May 2024 13:57:41 +0200 Subject: [PATCH 39/52] stores: fix TestDirectories and TestRenameObjects --- stores/metadata_test.go | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index afd7b7bc7..372ba90e9 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2497,12 +2497,12 @@ func TestRenameObjects(t *testing.T) { { id: 1, parentID: 0, - name: "/", + name: "", }, { id: 2, parentID: 1, - name: "/fileś/", + name: "fileś", }, } var directories []dbDirectory @@ -4855,30 +4855,35 @@ func TestDirectories(t *testing.T) { parentID uint }{ { - name: "/", + name: "", id: 1, parentID: 0, }, { - name: "/bar/", + name: "bar", id: 2, parentID: 1, }, { - name: "//", + name: "", id: 3, parentID: 1, }, { - name: "///", + name: "", id: 4, parentID: 3, }, { - name: "/dir/", + name: "dir", id: 2, parentID: 1, }, + { + name: "fakedir", + id: 5, + parentID: 2, + }, } var dbDirs []dbDirectory From 585a5fcab2617d7f915ffe1e19cab280d851d54c Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 3 May 2024 17:54:38 +0200 Subject: [PATCH 40/52] e2e: fix TestObjectEntries --- internal/test/e2e/cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 1a1f1200a..16a335265 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -251,7 +251,7 @@ func TestObjectEntries(t *testing.T) { entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasSuffix(entries[i].Name, "/") && entries[i].Name != "//double/" // double is a 'fake' dir + isDir := strings.HasSuffix(entries[i].Name, "/") if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { t.Fatal("unexpected mime type", entries[i].MimeType) } From 7a5b2d50d134a0d3062c916bb42d2b261ce02642 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Fri, 3 May 2024 18:08:34 +0200 Subject: [PATCH 41/52] stores: address comments --- stores/metadata.go | 18 +++++++++--------- stores/metadata_test.go | 4 ++-- .../mysql/main/migration_00008_directories.sql | 16 ---------------- .../main/migration_00008_directories_1.sql | 7 +++---- .../main/migration_00008_directories_2.sql | 5 +---- stores/migrations/mysql/main/schema.sql | 6 +++--- stores/migrations/sqlite/main/schema.sql | 6 +++--- 7 files changed, 21 insertions(+), 41 deletions(-) delete mode 100644 stores/migrations/mysql/main/migration_00008_directories.sql diff --git a/stores/metadata.go b/stores/metadata.go index 2282c614a..0aa9a5394 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -117,8 +117,8 @@ type ( dbDirectory struct { Model - Name string - ParentID uint + Name string + DBParentID uint } dbObject struct { @@ -1263,7 +1263,7 @@ INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ? -AND d.parent_id = ? +AND d.db_parent_id = ? GROUP BY d.id `, prefixExpr, sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), @@ -1286,7 +1286,7 @@ GROUP BY d.id bucket, // b.name = ? path + "%", // o.object_id LIKE ? utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? - dirID, // d.parent_id = ? + dirID, // d.db_parent_id = ? } } else { objectsQueryParams = []interface{}{ @@ -1296,7 +1296,7 @@ GROUP BY d.id bucket, // b.name = ? path + "%", // o.object_id LIKE ? utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? - dirID, // d.parent_id = ? + dirID, // d.db_parent_id = ? } } @@ -1783,7 +1783,7 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { splitPath := strings.Split(dirPath[1:len(dirPath)-1], "/") for _, dir := range splitPath { - if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND parent_id = ?", dir, dirID). + if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND db_parent_id = ?", dir, dirID). Scan(&dirID).Error; err != nil { return 0, fmt.Errorf("failed to fetch root directory: %w", err) } @@ -1811,8 +1811,8 @@ func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { DoNothing: true, }). Create(&dbDirectory{ - Name: dir, - ParentID: dirID, + Name: dir, + DBParentID: dirID, }).Error; err != nil { return 0, fmt.Errorf("failed to create directory %v: %w", dir, err) } @@ -2887,7 +2887,7 @@ DELETE FROM directories WHERE directories.id != 1 AND NOT EXISTS (SELECT 1 FROM objects WHERE objects.db_directory_id = directories.id) -AND NOT EXISTS (SELECT 1 FROM (SELECT 1 FROM directories AS d WHERE d.parent_id = directories.id) i) +AND NOT EXISTS (SELECT 1 FROM (SELECT 1 FROM directories AS d WHERE d.db_parent_id = directories.id) i) `) if res.Error != nil { return res.Error diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 372ba90e9..a1d37cf3c 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2518,8 +2518,8 @@ func TestRenameObjects(t *testing.T) { for i, dir := range directories { if dir.ID != expectedDirs[i].id { t.Fatalf("unexpected directory id, %v != %v", dir.ID, expectedDirs[i].id) - } else if dir.ParentID != expectedDirs[i].parentID { - t.Fatalf("unexpected directory parent id, %v != %v", dir.ParentID, expectedDirs[i].parentID) + } else if dir.DBParentID != expectedDirs[i].parentID { + t.Fatalf("unexpected directory parent id, %v != %v", dir.DBParentID, expectedDirs[i].parentID) } else if dir.Name != expectedDirs[i].name { t.Fatalf("unexpected directory name, %v != %v", dir.Name, expectedDirs[i].name) } diff --git a/stores/migrations/mysql/main/migration_00008_directories.sql b/stores/migrations/mysql/main/migration_00008_directories.sql deleted file mode 100644 index 0f91e6360..000000000 --- a/stores/migrations/mysql/main/migration_00008_directories.sql +++ /dev/null @@ -1,16 +0,0 @@ --- dbDirectory -CREATE TABLE `directories` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT, - `created_at` datetime(3) DEFAULT NULL, - `parent_id` bigint unsigned, - `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, - PRIMARY KEY (`id`), - KEY `idx_directories_parent_id` (`parent_id`), - UNIQUE KEY `idx_directories_name` (`name`), - CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; - -ALTER TABLE `objects` -ADD COLUMN `db_directory_id` bigint unsigned NOT NULL; -ADD CONSTRAINT `fk_objects_db_directory_id` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`); -CREATE INDEX `idx_objects_db_directory_id` ON `objects` (`db_directory_id`); \ No newline at end of file diff --git a/stores/migrations/mysql/main/migration_00008_directories_1.sql b/stores/migrations/mysql/main/migration_00008_directories_1.sql index acf80d35b..0cd166ca5 100644 --- a/stores/migrations/mysql/main/migration_00008_directories_1.sql +++ b/stores/migrations/mysql/main/migration_00008_directories_1.sql @@ -2,12 +2,11 @@ CREATE TABLE IF NOT EXISTS `directories` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `created_at` datetime(3) DEFAULT NULL, - `parent_id` bigint unsigned, + `db_parent_id` bigint unsigned, `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), - KEY `idx_directories_parent_id` (`parent_id`), - UNIQUE KEY `idx_directories_name_parent_id` (`name`, `parent_id`), - CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE + UNIQUE KEY `idx_directories_parent_id_name` (`db_parent_id`, `name`), + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbObject: add column and constraint diff --git a/stores/migrations/mysql/main/migration_00008_directories_2.sql b/stores/migrations/mysql/main/migration_00008_directories_2.sql index 2ca36a429..6033f7c02 100644 --- a/stores/migrations/mysql/main/migration_00008_directories_2.sql +++ b/stores/migrations/mysql/main/migration_00008_directories_2.sql @@ -1,6 +1,3 @@ -- Add NOT NULL to column ALTER TABLE `objects` -MODIFY COLUMN `db_directory_id` bigint unsigned NOT NULL; - --- Create index -CREATE INDEX `idx_objects_db_directory_id` ON `objects` (`db_directory_id`); \ No newline at end of file +MODIFY COLUMN `db_directory_id` bigint unsigned NOT NULL; \ No newline at end of file diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 4897f454d..103ef2663 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -317,11 +317,11 @@ CREATE TABLE `multipart_parts` ( CREATE TABLE `directories` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `created_at` datetime(3) DEFAULT NULL, - `parent_id` bigint unsigned, + `db_parent_id` bigint unsigned, `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), - UNIQUE KEY `idx_directories_parent_id_name` (`parent_id`, `name`), - CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE + UNIQUE KEY `idx_directories_parent_id_name` (`db_parent_id`, `name`), + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbObject diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 8e66d9975..48fb6fd6b 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -45,9 +45,9 @@ CREATE TABLE `buckets` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` date CREATE INDEX `idx_buckets_name` ON `buckets`(`name`); -- dbDirectory -CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories`(`id`)); -CREATE INDEX `idx_directories_parent_id` ON `directories`(`parent_id`); -CREATE UNIQUE INDEX `idx_directories_parent_id_name` ON `directories`(`parent_id`, `name`); +CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories`(`id`)); +CREATE INDEX `idx_directories_parent_id` ON `directories`(`db_parent_id`); +CREATE UNIQUE INDEX `idx_directories_parent_id_name` ON `directories`(`db_parent_id`, `name`); -- dbObject CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); From a28ebc3fb14d993680aeb8d888dcd55be7794fdb Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 7 May 2024 14:08:14 +0200 Subject: [PATCH 42/52] stores: sqlite migration --- .../sqlite/main/migration_00008_directories_1.sql | 12 ++++++++++++ .../sqlite/main/migration_00008_directories_2.sql | 1 + stores/migrations/sqlite/main/schema.sql | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 stores/migrations/sqlite/main/migration_00008_directories_1.sql create mode 100644 stores/migrations/sqlite/main/migration_00008_directories_2.sql diff --git a/stores/migrations/sqlite/main/migration_00008_directories_1.sql b/stores/migrations/sqlite/main/migration_00008_directories_1.sql new file mode 100644 index 000000000..83b2ecca0 --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00008_directories_1.sql @@ -0,0 +1,12 @@ +PRAGMA defer_foreign_keys=ON; + +-- dbDirectory +DROP TABLE IF EXISTS `directories`; +CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories`(`id`)); +CREATE INDEX `idx_directories_parent_id` ON `directories`(`db_parent_id`); +CREATE UNIQUE INDEX `idx_directories_parent_id_name` ON `directories`(`db_parent_id`, `name`); + +-- dbObject: add column and constraint +ALTER TABLE `objects` +ADD COLUMN `db_directory_id` integer DEFAULT 0 NOT NULL, +ADD CONSTRAINT `fk_objects_db_directory_id` FOREIGN KEY (`db_directory_id`) REFERENCES `directories` (`id`); \ No newline at end of file diff --git a/stores/migrations/sqlite/main/migration_00008_directories_2.sql b/stores/migrations/sqlite/main/migration_00008_directories_2.sql new file mode 100644 index 000000000..ce7dc625f --- /dev/null +++ b/stores/migrations/sqlite/main/migration_00008_directories_2.sql @@ -0,0 +1 @@ +-- nothing to do \ No newline at end of file diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 48fb6fd6b..3a751ba9e 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -50,7 +50,7 @@ CREATE INDEX `idx_directories_parent_id` ON `directories`(`db_parent_id`); CREATE UNIQUE INDEX `idx_directories_parent_id_name` ON `directories`(`db_parent_id`, `name`); -- dbObject -CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); +CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer NOT NULL, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); CREATE INDEX `idx_objects_db_bucket_id` ON `objects`(`db_bucket_id`); CREATE INDEX `idx_objects_etag` ON `objects`(`etag`); CREATE INDEX `idx_objects_health` ON `objects`(`health`); From 430c863026f7a5b480d6708071404c6e43289f9e Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 7 May 2024 15:14:06 +0200 Subject: [PATCH 43/52] stores: rootDirID constant --- stores/metadata.go | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 79f891105..8cd335f4b 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -22,9 +22,7 @@ import ( "lukechampine.com/frand" ) -var ( - pruneSlabsAlertID = frand.Entropy256() -) +const rootDirID = 1 const ( // batchDurationThreshold is the upper bound for the duration of a batch @@ -45,13 +43,6 @@ const ( refreshHealthMaxHealthValidity = 72 * time.Hour ) -var ( - errInvalidNumberOfShards = errors.New("slab has invalid number of shards") - errShardRootChanged = errors.New("shard root changed") - - objectDeleteBatchSizes = []int64{10, 50, 100, 200, 500, 1000, 5000, 10000, 50000, 100000} -) - const ( contractStateInvalid contractState = iota contractStatePending @@ -60,6 +51,17 @@ const ( contractStateFailed ) +var ( + pruneSlabsAlertID = frand.Entropy256() +) + +var ( + errInvalidNumberOfShards = errors.New("slab has invalid number of shards") + errShardRootChanged = errors.New("shard root changed") + + objectDeleteBatchSizes = []int64{10, 50, 100, 200, 500, 1000, 5000, 10000, 50000, 100000} +) + type ( contractState uint8 @@ -1793,7 +1795,7 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { // Create root dir. - dirID := uint(1) + dirID := uint(rootDirID) if err := tx.Model(&dbDirectory{}). Clauses(clause.OnConflict{ DoNothing: true, From 7fce58afc6047946d38af6f7bcf696357debb51a Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Tue, 7 May 2024 18:16:28 +0200 Subject: [PATCH 44/52] stores: delay foreign keys --- stores/migrations.go | 16 +++++++++++++--- .../main/migration_00008_directories_1.sql | 2 -- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/stores/migrations.go b/stores/migrations.go index b6e91cf97..57ce75158 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -130,9 +130,19 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { } // Create migrator. - opts := gormigrate.DefaultOptions - opts.UseTransaction = true - m := gormigrate.New(db, opts, migrations) + var m *gormigrate.Gormigrate + err := db.Transaction(func(tx *gorm.DB) error { + if isSQLite(tx) { + if err := tx.Exec("PRAGMA defer_foreign_keys = ON").Error; err != nil { + return fmt.Errorf("failed to defer foreign keys: %v", err) + } + } + m = gormigrate.New(tx, gormigrate.DefaultOptions, migrations) + return nil + }) + if err != nil { + return fmt.Errorf("failed to create migrator: %v", err) + } // Set init function. m.InitSchema(initSchema(dbIdentifier, logger)) diff --git a/stores/migrations/sqlite/main/migration_00008_directories_1.sql b/stores/migrations/sqlite/main/migration_00008_directories_1.sql index 55649bdf5..84dfb78a3 100644 --- a/stores/migrations/sqlite/main/migration_00008_directories_1.sql +++ b/stores/migrations/sqlite/main/migration_00008_directories_1.sql @@ -1,5 +1,3 @@ -PRAGMA defer_foreign_keys=ON; - -- dbDirectory DROP TABLE IF EXISTS `directories`; CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories`(`id`)); From a32b630baa39ccb9130c6f472815693f21a353a3 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 8 May 2024 09:13:00 +0200 Subject: [PATCH 45/52] Revert "stores: only store dir name" This reverts commit 7ba50647a496a2e0c0af9c5dffa29c3dd09df530. --- stores/metadata.go | 44 ++++++++++++++---------- stores/migrations/mysql/main/schema.sql | 6 ++++ stores/migrations/sqlite/main/schema.sql | 6 ++++ 3 files changed, 37 insertions(+), 19 deletions(-) diff --git a/stores/metadata.go b/stores/metadata.go index 8cd335f4b..4c39e521a 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1259,19 +1259,16 @@ SELECT o.etag as ETag, o.created_at as ModTime, o.object_id as ObjectName, o.siz FROM objects o WHERE o.object_id != ? AND o.db_directory_id = ? AND o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND %s UNION ALL -SELECT '' as ETag, MAX(o.created_at) as ModTime, %s as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType +SELECT '' as ETag, MAX(o.created_at) as ModTime, d.name as ObjectName, SUM(o.size) as Size, MIN(o.health) as Health, '' as MimeType FROM objects o -INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(%s)) = %s AND %s +INNER JOIN directories d ON SUBSTR(o.object_id, 1, %s(d.name)) = d.name AND %s WHERE o.db_bucket_id = (SELECT id FROM buckets b WHERE b.name = ?) AND o.object_id LIKE ? AND SUBSTR(o.object_id, 1, ?) = ? AND d.db_parent_id = ? GROUP BY d.id `, prefixExpr, - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), lengthFn, - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), - sqlConcat(s.db, sqlConcat(s.db, "?", "d.name"), "'/'"), prefixExpr) // build query params @@ -1294,7 +1291,6 @@ GROUP BY d.id objectsQueryParams = []interface{}{ path, // o.object_id != ? dirID, bucket, // o.db_directory_id = ? AND b.name = ? - path, path, path, bucket, // b.name = ? path + "%", // o.object_id LIKE ? utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? @@ -1778,19 +1774,19 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { } else if !strings.HasSuffix(dirPath, "/") { return 0, fmt.Errorf("path must end with /") } - dirID := uint(1) + if dirPath == "/" { - return dirID, nil // root dir returned + return 1, nil // root dir returned } - splitPath := strings.Split(dirPath[1:len(dirPath)-1], "/") - for _, dir := range splitPath { - if err := tx.Raw("SELECT id FROM directories WHERE name = ? AND db_parent_id = ?", dir, dirID). - Scan(&dirID).Error; err != nil { - return 0, fmt.Errorf("failed to fetch root directory: %w", err) - } + var dir dbDirectory + if err := tx.Where("name", dirPath). + Select("id"). + Take(&dir). + Error; err != nil { + return 0, fmt.Errorf("failed to fetch directory: %w", err) } - return dirID, nil + return dir.ID, nil } func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { @@ -1800,15 +1796,25 @@ func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { Clauses(clause.OnConflict{ DoNothing: true, }).Create(map[string]any{ - "id": dirID, + "id": dirID, + "name": "/", }).Error; err != nil { return 0, fmt.Errorf("failed to create root directory: %w", err) } // Create remaining directories. - path = strings.TrimPrefix(path, "/") - splitPath := strings.Split(path, "/") - for _, dir := range splitPath[:len(splitPath)-1] { + path = strings.TrimSuffix(path, "/") + if path == "/" { + return dirID, nil + } + for i := 0; i < utf8.RuneCountInString(path); i++ { + if path[i] != '/' { + continue + } + dir := path[:i+1] + if dir == "/" { + continue + } if err := tx.Clauses(clause.OnConflict{ DoNothing: true, }). diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index 103ef2663..d8144bb56 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -320,8 +320,14 @@ CREATE TABLE `directories` ( `db_parent_id` bigint unsigned, `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), +<<<<<<< HEAD UNIQUE KEY `idx_directories_parent_id_name` (`db_parent_id`, `name`), CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE +======= + KEY `idx_directories_parent_id` (`parent_id`), + UNIQUE KEY `idx_directories_name` (`name`), + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE +>>>>>>> parent of 7ba50647 (stores: only store dir name) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbObject diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index 3a751ba9e..a37fea83c 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -45,9 +45,15 @@ CREATE TABLE `buckets` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` date CREATE INDEX `idx_buckets_name` ON `buckets`(`name`); -- dbDirectory +<<<<<<< HEAD CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories`(`id`)); CREATE INDEX `idx_directories_parent_id` ON `directories`(`db_parent_id`); CREATE UNIQUE INDEX `idx_directories_parent_id_name` ON `directories`(`db_parent_id`, `name`); +======= +CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories`(`id`)); +CREATE INDEX `idx_directories_parent_id` ON `directories`(`parent_id`); +CREATE UNIQUE INDEX `idx_directories_name` ON `directories`(`name`); +>>>>>>> parent of 7ba50647 (stores: only store dir name) -- dbObject CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer NOT NULL, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); From d2f5ac18c45d746c9212fce78c8d4ab217e86ef4 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 8 May 2024 09:13:11 +0200 Subject: [PATCH 46/52] Revert "stores: fix TestDirectories and TestRenameObjects" This reverts commit c8c65c59194b6847c487e8a2133d441d7019bfe2. --- stores/metadata_test.go | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 5edf2706c..39bc7910d 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -2497,12 +2497,12 @@ func TestRenameObjects(t *testing.T) { { id: 1, parentID: 0, - name: "", + name: "/", }, { id: 2, parentID: 1, - name: "fileś", + name: "/fileś/", }, } var directories []dbDirectory @@ -4855,35 +4855,30 @@ func TestDirectories(t *testing.T) { parentID uint }{ { - name: "", + name: "/", id: 1, parentID: 0, }, { - name: "bar", + name: "/bar/", id: 2, parentID: 1, }, { - name: "", + name: "//", id: 3, parentID: 1, }, { - name: "", + name: "///", id: 4, parentID: 3, }, { - name: "dir", + name: "/dir/", id: 2, parentID: 1, }, - { - name: "fakedir", - id: 5, - parentID: 2, - }, } var dbDirs []dbDirectory From 7098ff06e26f90b011baa57eef6510818ac6afb6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 8 May 2024 09:31:32 +0200 Subject: [PATCH 47/52] stores: fix tests --- internal/test/e2e/cluster_test.go | 2 +- stores/metadata.go | 5 +--- stores/migrations.go | 27 ++++++++----------- .../main/migration_00008_directories_1.sql | 5 ++-- stores/migrations/mysql/main/schema.sql | 9 ++----- .../main/migration_00008_directories_1.sql | 2 +- stores/migrations/sqlite/main/schema.sql | 6 ----- 7 files changed, 19 insertions(+), 37 deletions(-) diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index 16a335265..c7a84a7b5 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -251,7 +251,7 @@ func TestObjectEntries(t *testing.T) { entries[i].ModTime = api.TimeRFC3339{} // assert mime type - isDir := strings.HasSuffix(entries[i].Name, "/") + isDir := strings.HasSuffix(entries[i].Name, "/") && entries[i].Name != "//double/" // double is a file if (isDir && entries[i].MimeType != "") || (!isDir && entries[i].MimeType == "") { t.Fatal("unexpected mime type", entries[i].MimeType) } diff --git a/stores/metadata.go b/stores/metadata.go index 4c39e521a..14d4d2975 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -1278,9 +1278,6 @@ GROUP BY d.id path, // o.object_id != ? dirID, bucket, // o.db_directory_id = ? AND b.name = ? utf8.RuneCountInString(path + prefix), path + prefix, - path, - path, - path, utf8.RuneCountInString(path + prefix), path + prefix, bucket, // b.name = ? path + "%", // o.object_id LIKE ? @@ -1291,7 +1288,7 @@ GROUP BY d.id objectsQueryParams = []interface{}{ path, // o.object_id != ? dirID, bucket, // o.db_directory_id = ? AND b.name = ? - bucket, // b.name = ? + bucket, path + "%", // o.object_id LIKE ? utf8.RuneCountInString(path), path, // SUBSTR(o.object_id, 1, ?) = ? dirID, // d.db_parent_id = ? diff --git a/stores/migrations.go b/stores/migrations.go index 57ce75158..d9cffab82 100644 --- a/stores/migrations.go +++ b/stores/migrations.go @@ -129,27 +129,22 @@ func performMigrations(db *gorm.DB, logger *zap.SugaredLogger) error { }, } - // Create migrator. - var m *gormigrate.Gormigrate - err := db.Transaction(func(tx *gorm.DB) error { + // Run migration + return db.Transaction(func(tx *gorm.DB) error { if isSQLite(tx) { if err := tx.Exec("PRAGMA defer_foreign_keys = ON").Error; err != nil { return fmt.Errorf("failed to defer foreign keys: %v", err) } } - m = gormigrate.New(tx, gormigrate.DefaultOptions, migrations) - return nil - }) - if err != nil { - return fmt.Errorf("failed to create migrator: %v", err) - } + m := gormigrate.New(tx, gormigrate.DefaultOptions, migrations) - // Set init function. - m.InitSchema(initSchema(dbIdentifier, logger)) + // Set init function. + m.InitSchema(initSchema(dbIdentifier, logger)) - // Perform migrations. - if err := m.Migrate(); err != nil { - return fmt.Errorf("failed to migrate: %v", err) - } - return nil + // Perform migrations. + if err := m.Migrate(); err != nil { + return fmt.Errorf("failed to migrate: %v", err) + } + return nil + }) } diff --git a/stores/migrations/mysql/main/migration_00008_directories_1.sql b/stores/migrations/mysql/main/migration_00008_directories_1.sql index 0cd166ca5..94b44ac18 100644 --- a/stores/migrations/mysql/main/migration_00008_directories_1.sql +++ b/stores/migrations/mysql/main/migration_00008_directories_1.sql @@ -1,11 +1,12 @@ -- dbDirectory -CREATE TABLE IF NOT EXISTS `directories` ( +CREATE TABLE `directories` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, `created_at` datetime(3) DEFAULT NULL, `db_parent_id` bigint unsigned, `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), - UNIQUE KEY `idx_directories_parent_id_name` (`db_parent_id`, `name`), + KEY `idx_directories_parent_id` (`db_parent_id`), + UNIQUE KEY `idx_directories_name` (`name`), CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/migrations/mysql/main/schema.sql b/stores/migrations/mysql/main/schema.sql index d8144bb56..975658e11 100644 --- a/stores/migrations/mysql/main/schema.sql +++ b/stores/migrations/mysql/main/schema.sql @@ -320,14 +320,9 @@ CREATE TABLE `directories` ( `db_parent_id` bigint unsigned, `name` varchar(766) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin DEFAULT NULL, PRIMARY KEY (`id`), -<<<<<<< HEAD - UNIQUE KEY `idx_directories_parent_id_name` (`db_parent_id`, `name`), - CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE -======= - KEY `idx_directories_parent_id` (`parent_id`), + KEY `idx_directories_parent_id` (`db_parent_id`), UNIQUE KEY `idx_directories_name` (`name`), - CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ->>>>>>> parent of 7ba50647 (stores: only store dir name) + CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbObject diff --git a/stores/migrations/sqlite/main/migration_00008_directories_1.sql b/stores/migrations/sqlite/main/migration_00008_directories_1.sql index 84dfb78a3..a283f39ad 100644 --- a/stores/migrations/sqlite/main/migration_00008_directories_1.sql +++ b/stores/migrations/sqlite/main/migration_00008_directories_1.sql @@ -2,7 +2,7 @@ DROP TABLE IF EXISTS `directories`; CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories`(`id`)); CREATE INDEX `idx_directories_parent_id` ON `directories`(`db_parent_id`); -CREATE UNIQUE INDEX `idx_directories_parent_id_name` ON `directories`(`db_parent_id`, `name`); +CREATE UNIQUE INDEX `idx_directories_name` ON `directories`(`name`); -- dbObject: add column and constraint DROP TABLE IF EXISTS `objects_temp`; diff --git a/stores/migrations/sqlite/main/schema.sql b/stores/migrations/sqlite/main/schema.sql index a37fea83c..aa03a862d 100644 --- a/stores/migrations/sqlite/main/schema.sql +++ b/stores/migrations/sqlite/main/schema.sql @@ -45,15 +45,9 @@ CREATE TABLE `buckets` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` date CREATE INDEX `idx_buckets_name` ON `buckets`(`name`); -- dbDirectory -<<<<<<< HEAD CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`db_parent_id`) REFERENCES `directories`(`id`)); CREATE INDEX `idx_directories_parent_id` ON `directories`(`db_parent_id`); -CREATE UNIQUE INDEX `idx_directories_parent_id_name` ON `directories`(`db_parent_id`, `name`); -======= -CREATE TABLE `directories` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`parent_id` integer,`name` text, CONSTRAINT `fk_directories_db_directories` FOREIGN KEY (`parent_id`) REFERENCES `directories`(`id`)); -CREATE INDEX `idx_directories_parent_id` ON `directories`(`parent_id`); CREATE UNIQUE INDEX `idx_directories_name` ON `directories`(`name`); ->>>>>>> parent of 7ba50647 (stores: only store dir name) -- dbObject CREATE TABLE `objects` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`db_bucket_id` integer NOT NULL, `db_directory_id` integer NOT NULL, `object_id` text,`key` blob,`health` real NOT NULL DEFAULT 1,`size` integer,`mime_type` text,`etag` text,CONSTRAINT `fk_objects_db_bucket` FOREIGN KEY (`db_bucket_id`) REFERENCES `buckets`(`id`),CONSTRAINT `fk_objects_db_directories` FOREIGN KEY (`db_directory_id`) REFERENCES `directories`(`id`)); From 591aeb3ee728a98ee01ee857294e6e92e9a35480 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 15 May 2024 10:56:56 +0200 Subject: [PATCH 48/52] mysql/sqlite: move common code to sql/common.go --- internal/sql/migrations.go | 188 ++++++++++++++++++ stores/sql/mysql/common.go | 38 +--- stores/sql/mysql/main.go | 132 +----------- stores/sql/mysql/metrics.go | 14 +- stores/sql/sqlite/common.go | 38 +--- stores/sql/sqlite/main.go | 135 +------------ stores/sql/sqlite/metrics.go | 14 +- ....sql => migration_00008_directories_1.sql} | 0 .../main/migration_00008_directories_2.sql | 1 + 9 files changed, 204 insertions(+), 356 deletions(-) create mode 100644 internal/sql/migrations.go rename stores/sql/sqlite/migrations/main/{migration_00008_directories.sql => migration_00008_directories_1.sql} (100%) create mode 100644 stores/sql/sqlite/migrations/main/migration_00008_directories_2.sql diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go new file mode 100644 index 000000000..957f82bbd --- /dev/null +++ b/internal/sql/migrations.go @@ -0,0 +1,188 @@ +package sql + +import ( + "embed" + "fmt" + "strings" + "unicode/utf8" + + "go.sia.tech/renterd/internal/utils" + "go.uber.org/zap" +) + +type Migration struct { + ID string + Migrate func(tx Tx) error +} + +var ( + MainMigrations = func(migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { + dbIdentifier := "main" + return []Migration{ + { + ID: "00001_init", + Migrate: func(tx Tx) error { return ErrRunV072 }, + }, + { + ID: "00001_object_metadata", + Migrate: func(tx Tx) error { + return performMigration(tx, migrationsFs, dbIdentifier, "00001_object_metadata", log) + }, + }, + { + ID: "00002_prune_slabs_trigger", + Migrate: func(tx Tx) error { + err := performMigration(tx, migrationsFs, dbIdentifier, "00002_prune_slabs_trigger", log) + if utils.IsErr(err, ErrMySQLNoSuperPrivilege) { + log.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") + } + return err + }, + }, + { + ID: "00003_idx_objects_size", + Migrate: func(tx Tx) error { + return performMigration(tx, migrationsFs, dbIdentifier, "00003_idx_objects_size", log) + }, + }, + { + ID: "00004_prune_slabs_cascade", + Migrate: func(tx Tx) error { + return performMigration(tx, migrationsFs, dbIdentifier, "00004_prune_slabs_cascade", log) + }, + }, + { + ID: "00005_zero_size_object_health", + Migrate: func(tx Tx) error { + return performMigration(tx, migrationsFs, dbIdentifier, "00005_zero_size_object_health", log) + }, + }, + { + ID: "00006_idx_objects_created_at", + Migrate: func(tx Tx) error { + return performMigration(tx, migrationsFs, dbIdentifier, "00006_idx_objects_created_at", log) + }, + }, + { + ID: "00007_host_checks", + Migrate: func(tx Tx) error { + return performMigration(tx, migrationsFs, dbIdentifier, "00007_host_checks", log) + }, + }, + { + ID: "00008_directories", + Migrate: func(tx Tx) error { + if err := performMigration(tx, migrationsFs, dbIdentifier, "00008_directories_1", log); err != nil { + return fmt.Errorf("failed to migrate: %v", err) + } + // helper type + type obj struct { + ID uint + ObjectID string + } + // loop over all objects and deduplicate dirs to create + log.Info("beginning post-migration directory creation, this might take a while") + batchSize := 10000 + processedDirs := make(map[string]struct{}) + for offset := 0; ; offset += batchSize { + if offset > 0 && offset%batchSize == 0 { + log.Infof("processed %v objects", offset) + } + var objBatch []obj + rows, err := tx.Query("SELECT id, object_id FROM objects ORDER BY id LIMIT ? OFFSET ?", batchSize, offset) + if err != nil { + return fmt.Errorf("failed to fetch objects: %v", err) + } + for rows.Next() { + var o obj + if err := rows.Scan(&o.ID, &o.ObjectID); err != nil { + return fmt.Errorf("failed to scan object: %v", err) + } + objBatch = append(objBatch, o) + } + if len(objBatch) == 0 { + break // done + } + for _, obj := range objBatch { + // check if dir was processed + dir := "" // root + if i := strings.LastIndex(obj.ObjectID, "/"); i > -1 { + dir = obj.ObjectID[:i+1] + } + _, exists := processedDirs[dir] + if exists { + continue // already processed + } + processedDirs[dir] = struct{}{} + + // process + dirID, err := MakeDirsForPath(tx, obj.ObjectID) + if err != nil { + return fmt.Errorf("failed to create directory %s: %w", obj.ObjectID, err) + } + + if _, err := tx.Exec(` + UPDATE objects + SET db_directory_id = ? + WHERE object_id LIKE ? AND + SUBSTR(object_id, 1, ?) = ? AND + INSTR(SUBSTR(object_id, ?), '/') = 0 + `, + dirID, + dir+"%", + utf8.RuneCountInString(dir), dir, + utf8.RuneCountInString(dir)+1); err != nil { + return fmt.Errorf("failed to update object %s: %w", obj.ObjectID, err) + } + } + } + log.Info("post-migration directory creation complete") + if err := performMigration(tx, migrationsFs, dbIdentifier, "00008_directories_2", log); err != nil { + return fmt.Errorf("failed to migrate: %v", err) + } + return nil + }, + }, + } + } + MetricsMigrations = func(migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { + dbIdentifier := "metrics" + return []Migration{ + { + ID: "00001_init", + Migrate: func(tx Tx) error { return ErrRunV072 }, + }, + { + ID: "00001_idx_contracts_fcid_timestamp", + Migrate: func(tx Tx) error { + return performMigration(tx, migrationsFs, dbIdentifier, "00001_idx_contracts_fcid_timestamp", log) + }, + }, + } + } +) + +func performMigration(tx Tx, fs embed.FS, kind, migration string, logger *zap.SugaredLogger) error { + logger.Infof("performing %s migration '%s'", kind, migration) + if err := ExecSQLFile(tx, fs, kind, fmt.Sprintf("migration_%s", migration)); err != nil { + return err + } + logger.Info("migration '%s' complete", migration) + return nil +} + +func ExecSQLFile(tx Tx, fs embed.FS, folder, filename string) error { + path := fmt.Sprintf("migrations/%s/%s.sql", folder, filename) + + // read file + file, err := fs.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read %s: %w", path, err) + } + + // execute it + if _, err := tx.Exec(string(file)); err != nil { + return fmt.Errorf("failed to execute %s: %w", path, err) + } + return nil +} diff --git a/stores/sql/mysql/common.go b/stores/sql/mysql/common.go index b4d670dfa..a6d9bdcd7 100644 --- a/stores/sql/mysql/common.go +++ b/stores/sql/mysql/common.go @@ -11,14 +11,9 @@ import ( ) //go:embed all:migrations/* -var migrations embed.FS +var migrationsFs embed.FS -type migration struct { - ID string - Migrate func(tx sql.Tx) error -} - -func performMigrations(db *sql.DB, identifier string, migrations []migration, l *zap.SugaredLogger) error { +func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration, l *zap.SugaredLogger) error { // check if the migrations table exists var dummy string if err := db.QueryRow("SHOW TABLES LIKE 'migrations'").Scan(&dummy); err != nil && !errors.Is(err, dsql.ErrNoRows) { @@ -66,25 +61,9 @@ func performMigrations(db *sql.DB, identifier string, migrations []migration, l return nil } -func execSQLFile(tx sql.Tx, folder, filename string) error { - path := fmt.Sprintf("migrations/%s/%s.sql", folder, filename) - - // read file - file, err := migrations.ReadFile(path) - if err != nil { - return fmt.Errorf("failed to read %s: %w", path, err) - } - - // execute it - if _, err := tx.Exec(string(file)); err != nil { - return fmt.Errorf("failed to execute %s: %w", path, err) - } - return nil -} - // initSchema is executed only on a clean database. Otherwise the individual // migrations are executed. -func initSchema(db *sql.DB, identifier string, migrations []migration, logger *zap.SugaredLogger) error { +func initSchema(db *sql.DB, identifier string, migrations []sql.Migration, logger *zap.SugaredLogger) error { return db.Transaction(func(tx sql.Tx) error { logger.Infof("initializing '%s' schema", identifier) @@ -107,7 +86,7 @@ func initSchema(db *sql.DB, identifier string, migrations []migration, logger *z } } // create remaining schema - if err := execSQLFile(tx, identifier, "schema"); err != nil { + if err := sql.ExecSQLFile(tx, migrationsFs, identifier, "schema"); err != nil { return fmt.Errorf("failed to execute schema: %w", err) } @@ -116,15 +95,6 @@ func initSchema(db *sql.DB, identifier string, migrations []migration, logger *z }) } -func performMigration(tx sql.Tx, kind, migration string, logger *zap.SugaredLogger) error { - logger.Infof("performing %s migration '%s'", kind, migration) - if err := execSQLFile(tx, kind, fmt.Sprintf("migration_%s", migration)); err != nil { - return err - } - logger.Info("migration '%s' complete", migration) - return nil -} - func version(db *sql.DB) (string, string, error) { var version string if err := db.QueryRow("select version()").Scan(&version); err != nil { diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 675b58a17..a67dd7fd0 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -3,13 +3,9 @@ package mysql import ( "context" dsql "database/sql" - "fmt" - "strings" "time" - "unicode/utf8" "go.sia.tech/renterd/internal/sql" - "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -33,133 +29,7 @@ func (b *MainDatabase) Close() error { } func (b *MainDatabase) Migrate() error { - dbIdentifier := "main" - return performMigrations(b.db, dbIdentifier, []migration{ - { - ID: "00001_init", - Migrate: func(tx sql.Tx) error { return sql.ErrRunV072 }, - }, - { - ID: "00001_object_metadata", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00001_object_metadata", b.log) - }, - }, - { - ID: "00002_prune_slabs_trigger", - Migrate: func(tx sql.Tx) error { - err := performMigration(tx, dbIdentifier, "00002_prune_slabs_trigger", b.log) - if utils.IsErr(err, sql.ErrMySQLNoSuperPrivilege) { - b.log.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") - } - return err - }, - }, - { - ID: "00003_idx_objects_size", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00003_idx_objects_size", b.log) - }, - }, - { - ID: "00004_prune_slabs_cascade", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00004_prune_slabs_cascade", b.log) - }, - }, - { - ID: "00005_zero_size_object_health", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00005_zero_size_object_health", b.log) - }, - }, - { - ID: "00006_idx_objects_created_at", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00006_idx_objects_created_at", b.log) - }, - }, - { - ID: "00007_host_checks", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00007_host_checks", b.log) - }, - }, // Remove the extra closing parenthesis here - { - ID: "00008_directories", - Migrate: func(tx sql.Tx) error { - if err := performMigration(tx, dbIdentifier, "00008_directories_1", b.log); err != nil { - return fmt.Errorf("failed to migrate: %v", err) - } - // helper type - type obj struct { - ID uint - ObjectID string - } - // loop over all objects and deduplicate dirs to create - b.log.Info("beginning post-migration directory creation, this might take a while") - batchSize := 10000 - processedDirs := make(map[string]struct{}) - for offset := 0; ; offset += batchSize { - if offset > 0 && offset%batchSize == 0 { - b.log.Infof("processed %v objects", offset) - } - var objBatch []obj - rows, err := tx.Query("SELECT id, object_id FROM objects ORDER BY id LIMIT ? OFFSET ?", batchSize, offset) - if err != nil { - return fmt.Errorf("failed to fetch objects: %v", err) - } - for rows.Next() { - var o obj - if err := rows.Scan(&o.ID, &o.ObjectID); err != nil { - return fmt.Errorf("failed to scan object: %v", err) - } - objBatch = append(objBatch, o) - } - if len(objBatch) == 0 { - break // done - } - for _, obj := range objBatch { - // check if dir was processed - dir := "" // root - if i := strings.LastIndex(obj.ObjectID, "/"); i > -1 { - dir = obj.ObjectID[:i+1] - } - _, exists := processedDirs[dir] - if exists { - continue // already processed - } - processedDirs[dir] = struct{}{} - - // process - dirID, err := sql.MakeDirsForPath(tx, obj.ObjectID) - if err != nil { - return fmt.Errorf("failed to create directory %s: %w", obj.ObjectID, err) - } - - if _, err := tx.Exec(` - UPDATE objects - SET db_directory_id = ? - WHERE object_id LIKE ? AND - SUBSTR(object_id, 1, ?) = ? AND - INSTR(SUBSTR(object_id, ?), '/') = 0 - `, - dirID, - dir+"%", - utf8.RuneCountInString(dir), dir, - utf8.RuneCountInString(dir)+1); err != nil { - return fmt.Errorf("failed to update object %s: %w", obj.ObjectID, err) - } - } - } - b.log.Info("post-migration directory creation complete") - if err := performMigration(tx, dbIdentifier, "00008_directories_2", b.log); err != nil { - return fmt.Errorf("failed to migrate: %v", err) - } - return nil - }, - }, - }, b.log) + return performMigrations(b.db, "main", sql.MainMigrations(migrationsFs, b.log), b.log) } func (b *MainDatabase) Version(_ context.Context) (string, string, error) { diff --git a/stores/sql/mysql/metrics.go b/stores/sql/mysql/metrics.go index 5ef528fb6..2708ee6c6 100644 --- a/stores/sql/mysql/metrics.go +++ b/stores/sql/mysql/metrics.go @@ -30,19 +30,7 @@ func (b *MetricsDatabase) Close() error { } func (b *MetricsDatabase) Migrate() error { - dbIdentifier := "metrics" - return performMigrations(b.db, dbIdentifier, []migration{ - { - ID: "00001_init", - Migrate: func(tx sql.Tx) error { return sql.ErrRunV072 }, - }, - { - ID: "00001_idx_contracts_fcid_timestamp", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00001_idx_contracts_fcid_timestamp", b.log) - }, - }, - }, b.log) + return performMigrations(b.db, "metrics", sql.MetricsMigrations(migrationsFs, b.log), b.log) } func (b *MetricsDatabase) Version(_ context.Context) (string, string, error) { diff --git a/stores/sql/sqlite/common.go b/stores/sql/sqlite/common.go index 31ce53e72..9836bfa11 100644 --- a/stores/sql/sqlite/common.go +++ b/stores/sql/sqlite/common.go @@ -11,14 +11,9 @@ import ( ) //go:embed all:migrations/* -var migrations embed.FS +var migrationsFs embed.FS -type migration struct { - ID string - Migrate func(tx sql.Tx) error -} - -func performMigrations(db *sql.DB, identifier string, migrations []migration, l *zap.SugaredLogger) error { +func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration, l *zap.SugaredLogger) error { // check if the migrations table exists var hasTable bool if err := db.QueryRow("SELECT 1 FROM sqlite_master WHERE type='table' AND name='migrations'").Scan(&hasTable); err != nil && !errors.Is(err, dsql.ErrNoRows) { @@ -72,25 +67,9 @@ func performMigrations(db *sql.DB, identifier string, migrations []migration, l return nil } -func execSQLFile(tx sql.Tx, folder, filename string) error { - path := fmt.Sprintf("migrations/%s/%s.sql", folder, filename) - - // read file - file, err := migrations.ReadFile(path) - if err != nil { - return fmt.Errorf("failed to read %s: %w", path, err) - } - - // execute it - if _, err := tx.Exec(string(file)); err != nil { - return fmt.Errorf("failed to execute %s: %w", path, err) - } - return nil -} - // initSchema is executed only on a clean database. Otherwise the individual // migrations are executed. -func initSchema(db *sql.DB, identifier string, migrations []migration, logger *zap.SugaredLogger) error { +func initSchema(db *sql.DB, identifier string, migrations []sql.Migration, logger *zap.SugaredLogger) error { return db.Transaction(func(tx sql.Tx) error { logger.Infof("initializing '%s' schema", identifier) @@ -109,7 +88,7 @@ func initSchema(db *sql.DB, identifier string, migrations []migration, logger *z } } // create remaining schema - if err := execSQLFile(tx, identifier, "schema"); err != nil { + if err := sql.ExecSQLFile(tx, migrationsFs, identifier, "schema"); err != nil { return fmt.Errorf("failed to execute schema: %w", err) } @@ -118,15 +97,6 @@ func initSchema(db *sql.DB, identifier string, migrations []migration, logger *z }) } -func performMigration(tx sql.Tx, kind, migration string, logger *zap.SugaredLogger) error { - logger.Infof("performing %s migration '%s'", kind, migration) - if err := execSQLFile(tx, kind, fmt.Sprintf("migration_%s", migration)); err != nil { - return err - } - logger.Info("migration '%s' complete", migration) - return nil -} - func version(db *sql.DB) (string, string, error) { var version string if err := db.QueryRow("select sqlite_version()").Scan(&version); err != nil { diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 94a7719d5..8ae71ab02 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -3,13 +3,9 @@ package sqlite import ( "context" dsql "database/sql" - "fmt" - "strings" "time" - "unicode/utf8" "go.sia.tech/renterd/internal/sql" - "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -32,133 +28,10 @@ func (b *MainDatabase) Close() error { return b.db.Close() } -func (b *MainDatabase) Version(_ context.Context) (string, string, error) { - return version(b.db) -} - func (b *MainDatabase) Migrate() error { - dbIdentifier := "main" - return performMigrations(b.db, dbIdentifier, []migration{ - { - ID: "00001_init", - Migrate: func(tx sql.Tx) error { return sql.ErrRunV072 }, - }, - { - ID: "00001_object_metadata", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00001_object_metadata", b.log) - }, - }, - { - ID: "00002_prune_slabs_trigger", - Migrate: func(tx sql.Tx) error { - err := performMigration(tx, dbIdentifier, "00002_prune_slabs_trigger", b.log) - if utils.IsErr(err, sql.ErrMySQLNoSuperPrivilege) { - b.log.Warn("migration 00002_prune_slabs_trigger requires the user to have the SUPER privilege to register triggers") - } - return err - }, - }, - { - ID: "00003_idx_objects_size", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00003_idx_objects_size", b.log) - }, - }, - { - ID: "00004_prune_slabs_cascade", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00004_prune_slabs_cascade", b.log) - }, - }, - { - ID: "00005_zero_size_object_health", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00005_zero_size_object_health", b.log) - }, - }, - { - ID: "00006_idx_objects_created_at", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00006_idx_objects_created_at", b.log) - }, - }, - { - ID: "00007_host_checks", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00007_host_checks", b.log) - }, - }, - { - ID: "00008_directories", - Migrate: func(tx sql.Tx) error { - if err := performMigration(tx, dbIdentifier, "00008_directories", b.log); err != nil { - return fmt.Errorf("failed to migrate: %v", err) - } - // helper type - type obj struct { - ID uint - ObjectID string - } - // loop over all objects and deduplicate dirs to create - b.log.Info("beginning post-migration directory creation, this might take a while") - batchSize := 10000 - processedDirs := make(map[string]struct{}) - for offset := 0; ; offset += batchSize { - if offset > 0 && offset%batchSize == 0 { - b.log.Infof("processed %v objects", offset) - } - var objBatch []obj - rows, err := tx.Query("SELECT id, object_id FROM objects ORDER BY id LIMIT ? OFFSET ?", batchSize, offset) - if err != nil { - return fmt.Errorf("failed to fetch objects: %v", err) - } - for rows.Next() { - var o obj - if err := rows.Scan(&o.ID, &o.ObjectID); err != nil { - return fmt.Errorf("failed to scan object: %v", err) - } - objBatch = append(objBatch, o) - } - if len(objBatch) == 0 { - break // done - } - for _, obj := range objBatch { - // check if dir was processed - dir := "" // root - if i := strings.LastIndex(obj.ObjectID, "/"); i > -1 { - dir = obj.ObjectID[:i+1] - } - _, exists := processedDirs[dir] - if exists { - continue // already processed - } - processedDirs[dir] = struct{}{} - - // process - dirID, err := sql.MakeDirsForPath(tx, obj.ObjectID) - if err != nil { - return fmt.Errorf("failed to create directory %s: %w", obj.ObjectID, err) - } + return performMigrations(b.db, "main", sql.MainMigrations(migrationsFs, b.log), b.log) +} - if _, err := tx.Exec(` - UPDATE objects - SET db_directory_id = ? - WHERE object_id LIKE ? AND - SUBSTR(object_id, 1, ?) = ? AND - INSTR(SUBSTR(object_id, ?), '/') = 0 - `, - dirID, - dir+"%", - utf8.RuneCountInString(dir), dir, - utf8.RuneCountInString(dir)+1); err != nil { - return fmt.Errorf("failed to update object %s: %w", obj.ObjectID, err) - } - } - } - b.log.Info("post-migration directory creation complete") - return nil - }, - }, - }, b.log) +func (b *MainDatabase) Version(_ context.Context) (string, string, error) { + return version(b.db) } diff --git a/stores/sql/sqlite/metrics.go b/stores/sql/sqlite/metrics.go index 6e49a1618..026a9734b 100644 --- a/stores/sql/sqlite/metrics.go +++ b/stores/sql/sqlite/metrics.go @@ -33,17 +33,5 @@ func (b *MetricsDatabase) Version(_ context.Context) (string, string, error) { } func (b *MetricsDatabase) Migrate() error { - dbIdentifier := "metrics" - return performMigrations(b.db, dbIdentifier, []migration{ - { - ID: "00001_init", - Migrate: func(tx sql.Tx) error { return sql.ErrRunV072 }, - }, - { - ID: "00001_idx_contracts_fcid_timestamp", - Migrate: func(tx sql.Tx) error { - return performMigration(tx, dbIdentifier, "00001_idx_contracts_fcid_timestamp", b.log) - }, - }, - }, b.log) + return performMigrations(b.db, "metrics", sql.MetricsMigrations(migrationsFs, b.log), b.log) } diff --git a/stores/sql/sqlite/migrations/main/migration_00008_directories.sql b/stores/sql/sqlite/migrations/main/migration_00008_directories_1.sql similarity index 100% rename from stores/sql/sqlite/migrations/main/migration_00008_directories.sql rename to stores/sql/sqlite/migrations/main/migration_00008_directories_1.sql diff --git a/stores/sql/sqlite/migrations/main/migration_00008_directories_2.sql b/stores/sql/sqlite/migrations/main/migration_00008_directories_2.sql new file mode 100644 index 000000000..ce7dc625f --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00008_directories_2.sql @@ -0,0 +1 @@ +-- nothing to do \ No newline at end of file From 1426f8a98a762a8bf77f78a032d9d5f42bbfcc21 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 15 May 2024 11:44:54 +0200 Subject: [PATCH 49/52] sql: get rid of INIT_SCHEMA --- internal/sql/migrations.go | 32 +++++++++++++----- internal/sql/sql.go | 2 -- stores/sql/mysql/common.go | 64 +++++------------------------------- stores/sql/mysql/main.go | 2 +- stores/sql/mysql/metrics.go | 2 +- stores/sql/sqlite/common.go | 56 +++---------------------------- stores/sql/sqlite/main.go | 2 +- stores/sql/sqlite/metrics.go | 2 +- 8 files changed, 42 insertions(+), 120 deletions(-) diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index 957f82bbd..e8ca26915 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -162,16 +162,23 @@ var ( } ) -func performMigration(tx Tx, fs embed.FS, kind, migration string, logger *zap.SugaredLogger) error { - logger.Infof("performing %s migration '%s'", kind, migration) - if err := ExecSQLFile(tx, fs, kind, fmt.Sprintf("migration_%s", migration)); err != nil { - return err - } - logger.Info("migration '%s' complete", migration) - return nil +func InitSchema(db *DB, fs embed.FS, identifier string, migrations []Migration) error { + return db.Transaction(func(tx Tx) error { + // init schema + if err := execSQLFile(tx, fs, identifier, "schema"); err != nil { + return fmt.Errorf("failed to execute schema: %w", err) + } + // insert migration ids + for _, migration := range migrations { + if _, err := tx.Exec("INSERT INTO migrations (id) VALUES (?)", migration.ID); err != nil { + return fmt.Errorf("failed to insert migration '%s': %w", migration.ID, err) + } + } + return nil + }) } -func ExecSQLFile(tx Tx, fs embed.FS, folder, filename string) error { +func execSQLFile(tx Tx, fs embed.FS, folder, filename string) error { path := fmt.Sprintf("migrations/%s/%s.sql", folder, filename) // read file @@ -186,3 +193,12 @@ func ExecSQLFile(tx Tx, fs embed.FS, folder, filename string) error { } return nil } + +func performMigration(tx Tx, fs embed.FS, kind, migration string, logger *zap.SugaredLogger) error { + logger.Infof("performing %s migration '%s'", kind, migration) + if err := execSQLFile(tx, fs, kind, fmt.Sprintf("migration_%s", migration)); err != nil { + return err + } + logger.Info("migration '%s' complete", migration) + return nil +} diff --git a/internal/sql/sql.go b/internal/sql/sql.go index b2cf36b87..d9794a50d 100644 --- a/internal/sql/sql.go +++ b/internal/sql/sql.go @@ -19,8 +19,6 @@ const ( factor = 1.8 // factor ^ retryAttempts = backoff time in milliseconds maxBackoff = 15 * time.Second - SCHEMA_INIT = "SCHEMA_INIT" - DirectoriesRootID = 1 ) diff --git a/stores/sql/mysql/common.go b/stores/sql/mysql/common.go index a6d9bdcd7..2bf6c78ec 100644 --- a/stores/sql/mysql/common.go +++ b/stores/sql/mysql/common.go @@ -1,27 +1,23 @@ package mysql import ( - dsql "database/sql" "embed" - "errors" "fmt" "go.sia.tech/renterd/internal/sql" - "go.uber.org/zap" ) //go:embed all:migrations/* var migrationsFs embed.FS -func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration, l *zap.SugaredLogger) error { - // check if the migrations table exists - var dummy string - if err := db.QueryRow("SHOW TABLES LIKE 'migrations'").Scan(&dummy); err != nil && !errors.Is(err, dsql.ErrNoRows) { - return fmt.Errorf("failed to check for migrations table: %w", err) - } - if dummy == "" { - // init schema if it doesn't - return initSchema(db, identifier, migrations, l) +func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration) error { + // try to create migrations table + if _, err := db.Exec(` + CREATE TABLE migrations ( + id varchar(255) NOT NULL, + PRIMARY KEY (id) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;`); err != nil { + return fmt.Errorf("failed to create migrations table: %w", err) } // check if the migrations table is empty @@ -30,15 +26,7 @@ func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration return fmt.Errorf("failed to count rows in migrations table: %w", err) } else if isEmpty { // table is empty, init schema - return initSchema(db, identifier, migrations, l) - } - - // check if the schema was initialised already - var initialised bool - if err := db.QueryRow("SELECT EXISTS (SELECT 1 FROM migrations WHERE id = ?)", sql.SCHEMA_INIT).Scan(&initialised); err != nil { - return fmt.Errorf("failed to check if schema was initialised: %w", err) - } else if !initialised { - return fmt.Errorf("schema was not initialised but has a non-empty migration table") + return sql.InitSchema(db, migrationsFs, identifier, migrations) } // apply missing migrations @@ -61,40 +49,6 @@ func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration return nil } -// initSchema is executed only on a clean database. Otherwise the individual -// migrations are executed. -func initSchema(db *sql.DB, identifier string, migrations []sql.Migration, logger *zap.SugaredLogger) error { - return db.Transaction(func(tx sql.Tx) error { - logger.Infof("initializing '%s' schema", identifier) - - // create migrations table if necessary - if _, err := tx.Exec(` - CREATE TABLE migrations ( - id varchar(255) NOT NULL, - PRIMARY KEY (id) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;`); err != nil { - return fmt.Errorf("failed to create migrations table: %w", err) - } - // insert SCHEMA_INIT - if _, err := tx.Exec("INSERT INTO migrations (id) VALUES (?)", sql.SCHEMA_INIT); err != nil { - return fmt.Errorf("failed to insert SCHEMA_INIT: %w", err) - } - // insert migration ids - for _, migration := range migrations { - if _, err := tx.Exec("INSERT INTO migrations (id) VALUES (?)", migration.ID); err != nil { - return fmt.Errorf("failed to insert migration '%s': %w", migration.ID, err) - } - } - // create remaining schema - if err := sql.ExecSQLFile(tx, migrationsFs, identifier, "schema"); err != nil { - return fmt.Errorf("failed to execute schema: %w", err) - } - - logger.Infof("initialization complete") - return nil - }) -} - func version(db *sql.DB) (string, string, error) { var version string if err := db.QueryRow("select version()").Scan(&version); err != nil { diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index a67dd7fd0..439a39309 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -29,7 +29,7 @@ func (b *MainDatabase) Close() error { } func (b *MainDatabase) Migrate() error { - return performMigrations(b.db, "main", sql.MainMigrations(migrationsFs, b.log), b.log) + return performMigrations(b.db, "main", sql.MainMigrations(migrationsFs, b.log)) } func (b *MainDatabase) Version(_ context.Context) (string, string, error) { diff --git a/stores/sql/mysql/metrics.go b/stores/sql/mysql/metrics.go index 2708ee6c6..0eb64a943 100644 --- a/stores/sql/mysql/metrics.go +++ b/stores/sql/mysql/metrics.go @@ -30,7 +30,7 @@ func (b *MetricsDatabase) Close() error { } func (b *MetricsDatabase) Migrate() error { - return performMigrations(b.db, "metrics", sql.MetricsMigrations(migrationsFs, b.log), b.log) + return performMigrations(b.db, "metrics", sql.MetricsMigrations(migrationsFs, b.log)) } func (b *MetricsDatabase) Version(_ context.Context) (string, string, error) { diff --git a/stores/sql/sqlite/common.go b/stores/sql/sqlite/common.go index 9836bfa11..43c8974e0 100644 --- a/stores/sql/sqlite/common.go +++ b/stores/sql/sqlite/common.go @@ -1,27 +1,19 @@ package sqlite import ( - dsql "database/sql" "embed" - "errors" "fmt" "go.sia.tech/renterd/internal/sql" - "go.uber.org/zap" ) //go:embed all:migrations/* var migrationsFs embed.FS -func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration, l *zap.SugaredLogger) error { - // check if the migrations table exists - var hasTable bool - if err := db.QueryRow("SELECT 1 FROM sqlite_master WHERE type='table' AND name='migrations'").Scan(&hasTable); err != nil && !errors.Is(err, dsql.ErrNoRows) { - return fmt.Errorf("failed to check for migrations table: %w", err) - } - if !hasTable { - // init schema if it doesn't - return initSchema(db, identifier, migrations, l) +func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration) error { + // try to create migrations table + if _, err := db.Exec("CREATE TABLE IF NOT EXISTS `migrations` (`id` text,PRIMARY KEY (`id`))"); err != nil { + return fmt.Errorf("failed to create migrations table: %w", err) } // check if the migrations table is empty @@ -30,15 +22,7 @@ func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration return fmt.Errorf("failed to count rows in migrations table: %w", err) } else if isEmpty { // table is empty, init schema - return initSchema(db, identifier, migrations, l) - } - - // check if the schema was initialised already - var initialised bool - if err := db.QueryRow("SELECT EXISTS (SELECT 1 FROM migrations WHERE id = ?)", sql.SCHEMA_INIT).Scan(&initialised); err != nil { - return fmt.Errorf("failed to check if schema was initialised: %w", err) - } else if !initialised { - return fmt.Errorf("schema was not initialised but has a non-empty migration table") + return sql.InitSchema(db, migrationsFs, identifier, migrations) } // apply missing migrations @@ -67,36 +51,6 @@ func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration return nil } -// initSchema is executed only on a clean database. Otherwise the individual -// migrations are executed. -func initSchema(db *sql.DB, identifier string, migrations []sql.Migration, logger *zap.SugaredLogger) error { - return db.Transaction(func(tx sql.Tx) error { - logger.Infof("initializing '%s' schema", identifier) - - // create migrations table if necessary - if _, err := tx.Exec("CREATE TABLE IF NOT EXISTS `migrations` (`id` text,PRIMARY KEY (`id`))"); err != nil { - return fmt.Errorf("failed to create migrations table: %w", err) - } - // insert SCHEMA_INIT - if _, err := tx.Exec("INSERT INTO migrations (id) VALUES (?)", sql.SCHEMA_INIT); err != nil { - return fmt.Errorf("failed to insert SCHEMA_INIT: %w", err) - } - // insert migration ids - for _, migration := range migrations { - if _, err := tx.Exec("INSERT INTO migrations (id) VALUES (?)", migration.ID); err != nil { - return fmt.Errorf("failed to insert migration '%s': %w", migration.ID, err) - } - } - // create remaining schema - if err := sql.ExecSQLFile(tx, migrationsFs, identifier, "schema"); err != nil { - return fmt.Errorf("failed to execute schema: %w", err) - } - - logger.Infof("initialization complete") - return nil - }) -} - func version(db *sql.DB) (string, string, error) { var version string if err := db.QueryRow("select sqlite_version()").Scan(&version); err != nil { diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 8ae71ab02..e5ac631ec 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -29,7 +29,7 @@ func (b *MainDatabase) Close() error { } func (b *MainDatabase) Migrate() error { - return performMigrations(b.db, "main", sql.MainMigrations(migrationsFs, b.log), b.log) + return performMigrations(b.db, "main", sql.MainMigrations(migrationsFs, b.log)) } func (b *MainDatabase) Version(_ context.Context) (string, string, error) { diff --git a/stores/sql/sqlite/metrics.go b/stores/sql/sqlite/metrics.go index 026a9734b..93dea240d 100644 --- a/stores/sql/sqlite/metrics.go +++ b/stores/sql/sqlite/metrics.go @@ -33,5 +33,5 @@ func (b *MetricsDatabase) Version(_ context.Context) (string, string, error) { } func (b *MetricsDatabase) Migrate() error { - return performMigrations(b.db, "metrics", sql.MetricsMigrations(migrationsFs, b.log), b.log) + return performMigrations(b.db, "metrics", sql.MetricsMigrations(migrationsFs, b.log)) } From 97894fbb885d4d5d527d1f974c1826ef319dc7a5 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 15 May 2024 13:56:53 +0200 Subject: [PATCH 50/52] sql: add Migrator interface --- internal/sql/common.go | 53 --------------------- internal/sql/migrations.go | 91 +++++++++++++++++++++++++++++------- stores/sql/mysql/common.go | 36 +++----------- stores/sql/mysql/main.go | 63 ++++++++++++++++++++++++- stores/sql/mysql/metrics.go | 14 +++++- stores/sql/sqlite/common.go | 47 +++++-------------- stores/sql/sqlite/main.go | 63 ++++++++++++++++++++++++- stores/sql/sqlite/metrics.go | 14 +++++- 8 files changed, 242 insertions(+), 139 deletions(-) delete mode 100644 internal/sql/common.go diff --git a/internal/sql/common.go b/internal/sql/common.go deleted file mode 100644 index 956d580e1..000000000 --- a/internal/sql/common.go +++ /dev/null @@ -1,53 +0,0 @@ -package sql - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -func MakeDirsForPath(tx Tx, path string) (uint, error) { - insertDirStmt, err := tx.Prepare("INSERT INTO directories (name, db_parent_id) VALUES (?, ?) ON DUPLICATE KEY UPDATE id = id") - if err != nil { - return 0, fmt.Errorf("failed to prepare statement: %w", err) - } - defer insertDirStmt.Close() - - queryDirStmt, err := tx.Prepare("SELECT id FROM directories WHERE name = ?") - if err != nil { - return 0, fmt.Errorf("failed to prepare statement: %w", err) - } - defer queryDirStmt.Close() - - // Create root dir. - dirID := uint(DirectoriesRootID) - if _, err := insertDirStmt.Exec('/', dirID); err != nil { - return 0, fmt.Errorf("failed to create root directory: %w", err) - } - - // Create remaining directories. - path = strings.TrimSuffix(path, "/") - if path == "/" { - return dirID, nil - } - for i := 0; i < utf8.RuneCountInString(path); i++ { - if path[i] != '/' { - continue - } - dir := path[:i+1] - if dir == "/" { - continue - } - if _, err := insertDirStmt.Exec(dir, dirID); err != nil { - return 0, fmt.Errorf("failed to create directory %v: %w", dir, err) - } - var childID uint - if err := queryDirStmt.QueryRow(dir).Scan(&childID); err != nil { - return 0, fmt.Errorf("failed to fetch directory id %v: %w", dir, err) - } else if childID == 0 { - return 0, fmt.Errorf("dir we just created doesn't exist - shouldn't happen") - } - dirID = childID - } - return dirID, nil -} diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index e8ca26915..f33ce0e90 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -10,13 +10,28 @@ import ( "go.uber.org/zap" ) -type Migration struct { - ID string - Migrate func(tx Tx) error -} +type ( + Migration struct { + ID string + Migrate func(tx Tx) error + } + + // Migrator is an interface for defining db - specific helper methods + // required during migrations + Migrator interface { + ApplyMigration(func(tx Tx) error) error + CreateMigrationTable() error + DB() *DB + } + + MainMigrator interface { + Migrator + MakeDirsForPath(tx Tx, path string) (uint, error) + } +) var ( - MainMigrations = func(migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { + MainMigrations = func(m MainMigrator, migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { dbIdentifier := "main" return []Migration{ { @@ -116,7 +131,7 @@ var ( processedDirs[dir] = struct{}{} // process - dirID, err := MakeDirsForPath(tx, obj.ObjectID) + dirID, err := m.MakeDirsForPath(tx, obj.ObjectID) if err != nil { return fmt.Errorf("failed to create directory %s: %w", obj.ObjectID, err) } @@ -162,20 +177,46 @@ var ( } ) -func InitSchema(db *DB, fs embed.FS, identifier string, migrations []Migration) error { - return db.Transaction(func(tx Tx) error { - // init schema - if err := execSQLFile(tx, fs, identifier, "schema"); err != nil { - return fmt.Errorf("failed to execute schema: %w", err) - } - // insert migration ids - for _, migration := range migrations { +func PerformMigrations(m Migrator, fs embed.FS, identifier string, migrations []Migration) error { + // try to create migrations table + err := m.CreateMigrationTable() + if err != nil { + return fmt.Errorf("failed to create migrations table: %w", err) + } + + // check if the migrations table is empty + var isEmpty bool + if err := m.DB().QueryRow("SELECT COUNT(*) = 0 FROM migrations").Scan(&isEmpty); err != nil { + return fmt.Errorf("failed to count rows in migrations table: %w", err) + } else if isEmpty { + // table is empty, init schema + return initSchema(m.DB(), fs, identifier, migrations) + } + + // apply missing migrations + for _, migration := range migrations { + if err := m.ApplyMigration(func(tx Tx) error { + // check if migration was already applied + var applied bool + if err := tx.QueryRow("SELECT EXISTS (SELECT 1 FROM migrations WHERE id = ?)", migration.ID).Scan(&applied); err != nil { + return fmt.Errorf("failed to check if migration '%s' was already applied: %w", migration.ID, err) + } else if applied { + return nil + } + // run migration + if err := migration.Migrate(tx); err != nil { + return fmt.Errorf("migration '%s' failed: %w", migration.ID, err) + } + // insert migration if _, err := tx.Exec("INSERT INTO migrations (id) VALUES (?)", migration.ID); err != nil { return fmt.Errorf("failed to insert migration '%s': %w", migration.ID, err) } + return nil + }); err != nil { + return fmt.Errorf("migration '%s' failed: %w", migration.ID, err) } - return nil - }) + } + return nil } func execSQLFile(tx Tx, fs embed.FS, folder, filename string) error { @@ -194,11 +235,27 @@ func execSQLFile(tx Tx, fs embed.FS, folder, filename string) error { return nil } +func initSchema(db *DB, fs embed.FS, identifier string, migrations []Migration) error { + return db.Transaction(func(tx Tx) error { + // init schema + if err := execSQLFile(tx, fs, identifier, "schema"); err != nil { + return fmt.Errorf("failed to execute schema: %w", err) + } + // insert migration ids + for _, migration := range migrations { + if _, err := tx.Exec("INSERT INTO migrations (id) VALUES (?)", migration.ID); err != nil { + return fmt.Errorf("failed to insert migration '%s': %w", migration.ID, err) + } + } + return nil + }) +} + func performMigration(tx Tx, fs embed.FS, kind, migration string, logger *zap.SugaredLogger) error { logger.Infof("performing %s migration '%s'", kind, migration) if err := execSQLFile(tx, fs, kind, fmt.Sprintf("migration_%s", migration)); err != nil { return err } - logger.Info("migration '%s' complete", migration) + logger.Infof("migration '%s' complete", migration) return nil } diff --git a/stores/sql/mysql/common.go b/stores/sql/mysql/common.go index 2bf6c78ec..3182a8977 100644 --- a/stores/sql/mysql/common.go +++ b/stores/sql/mysql/common.go @@ -10,42 +10,18 @@ import ( //go:embed all:migrations/* var migrationsFs embed.FS -func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration) error { - // try to create migrations table +func applyMigration(db *sql.DB, fn func(tx sql.Tx) error) error { + return db.Transaction(fn) +} + +func createMigrationTable(db *sql.DB) error { if _, err := db.Exec(` - CREATE TABLE migrations ( + CREATE TABLE IF NOT EXISTS migrations ( id varchar(255) NOT NULL, PRIMARY KEY (id) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;`); err != nil { return fmt.Errorf("failed to create migrations table: %w", err) } - - // check if the migrations table is empty - var isEmpty bool - if err := db.QueryRow("SELECT COUNT(*) = 0 FROM migrations").Scan(&isEmpty); err != nil { - return fmt.Errorf("failed to count rows in migrations table: %w", err) - } else if isEmpty { - // table is empty, init schema - return sql.InitSchema(db, migrationsFs, identifier, migrations) - } - - // apply missing migrations - for _, migration := range migrations { - if err := db.Transaction(func(tx sql.Tx) error { - // check if migration was already applied - var applied bool - if err := tx.QueryRow("SELECT EXISTS (SELECT 1 FROM migrations WHERE id = ?)", migration.ID).Scan(&applied); err != nil { - return fmt.Errorf("failed to check if migration '%s' was already applied: %w", migration.ID, err) - } else if applied { - return nil - } - - // run migration - return migration.Migrate(tx) - }); err != nil { - return fmt.Errorf("migration '%s' failed: %w", migration.ID, err) - } - } return nil } diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 439a39309..649bf05e4 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -3,7 +3,10 @@ package mysql import ( "context" dsql "database/sql" + "fmt" + "strings" "time" + "unicode/utf8" "go.sia.tech/renterd/internal/sql" @@ -24,12 +27,70 @@ func NewMainDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Duration } } +func (b *MainDatabase) ApplyMigration(fn func(tx sql.Tx) error) error { + return applyMigration(b.db, fn) +} + func (b *MainDatabase) Close() error { return b.db.Close() } +func (b *MainDatabase) DB() *sql.DB { + return b.db +} + +func (b *MainDatabase) CreateMigrationTable() error { + return createMigrationTable(b.db) +} + +func (b *MainDatabase) MakeDirsForPath(tx sql.Tx, path string) (uint, error) { + insertDirStmt, err := tx.Prepare("INSERT INTO directories (name, db_parent_id) VALUES (?, ?) ON DUPLICATE KEY UPDATE id = id") + if err != nil { + return 0, fmt.Errorf("failed to prepare statement: %w", err) + } + defer insertDirStmt.Close() + + queryDirStmt, err := tx.Prepare("SELECT id FROM directories WHERE name = ?") + if err != nil { + return 0, fmt.Errorf("failed to prepare statement: %w", err) + } + defer queryDirStmt.Close() + + // Create root dir. + dirID := uint(sql.DirectoriesRootID) + if _, err := insertDirStmt.Exec('/', dirID); err != nil { + return 0, fmt.Errorf("failed to create root directory: %w", err) + } + + // Create remaining directories. + path = strings.TrimSuffix(path, "/") + if path == "/" { + return dirID, nil + } + for i := 0; i < utf8.RuneCountInString(path); i++ { + if path[i] != '/' { + continue + } + dir := path[:i+1] + if dir == "/" { + continue + } + if _, err := insertDirStmt.Exec(dir, dirID); err != nil { + return 0, fmt.Errorf("failed to create directory %v: %w", dir, err) + } + var childID uint + if err := queryDirStmt.QueryRow(dir).Scan(&childID); err != nil { + return 0, fmt.Errorf("failed to fetch directory id %v: %w", dir, err) + } else if childID == 0 { + return 0, fmt.Errorf("dir we just created doesn't exist - shouldn't happen") + } + dirID = childID + } + return dirID, nil +} + func (b *MainDatabase) Migrate() error { - return performMigrations(b.db, "main", sql.MainMigrations(migrationsFs, b.log)) + return sql.PerformMigrations(b, migrationsFs, "main", sql.MainMigrations(b, migrationsFs, b.log)) } func (b *MainDatabase) Version(_ context.Context) (string, string, error) { diff --git a/stores/sql/mysql/metrics.go b/stores/sql/mysql/metrics.go index 0eb64a943..3bba48536 100644 --- a/stores/sql/mysql/metrics.go +++ b/stores/sql/mysql/metrics.go @@ -25,12 +25,24 @@ func NewMetricsDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Durat } } +func (b *MetricsDatabase) ApplyMigration(fn func(tx sql.Tx) error) error { + return applyMigration(b.db, fn) +} + func (b *MetricsDatabase) Close() error { return b.db.Close() } +func (b *MetricsDatabase) DB() *sql.DB { + return b.db +} + +func (b *MetricsDatabase) CreateMigrationTable() error { + return createMigrationTable(b.db) +} + func (b *MetricsDatabase) Migrate() error { - return performMigrations(b.db, "metrics", sql.MetricsMigrations(migrationsFs, b.log)) + return sql.PerformMigrations(b, migrationsFs, "metrics", sql.MetricsMigrations(migrationsFs, b.log)) } func (b *MetricsDatabase) Version(_ context.Context) (string, string, error) { diff --git a/stores/sql/sqlite/common.go b/stores/sql/sqlite/common.go index 43c8974e0..92a2b226f 100644 --- a/stores/sql/sqlite/common.go +++ b/stores/sql/sqlite/common.go @@ -10,44 +10,21 @@ import ( //go:embed all:migrations/* var migrationsFs embed.FS -func performMigrations(db *sql.DB, identifier string, migrations []sql.Migration) error { - // try to create migrations table +func applyMigration(db *sql.DB, fn func(tx sql.Tx) error) error { + return db.Transaction(func(tx sql.Tx) error { + // defer foreign_keys to avoid triggering unwanted CASCADEs or + // constraint failures + if _, err := tx.Exec("PRAGMA defer_foreign_keys = ON"); err != nil { + return fmt.Errorf("failed to defer foreign keys: %w", err) + } + return fn(tx) + }) +} + +func createMigrationTable(db *sql.DB) error { if _, err := db.Exec("CREATE TABLE IF NOT EXISTS `migrations` (`id` text,PRIMARY KEY (`id`))"); err != nil { return fmt.Errorf("failed to create migrations table: %w", err) } - - // check if the migrations table is empty - var isEmpty bool - if err := db.QueryRow("SELECT COUNT(*) = 0 FROM migrations").Scan(&isEmpty); err != nil { - return fmt.Errorf("failed to count rows in migrations table: %w", err) - } else if isEmpty { - // table is empty, init schema - return sql.InitSchema(db, migrationsFs, identifier, migrations) - } - - // apply missing migrations - for _, migration := range migrations { - if err := db.Transaction(func(tx sql.Tx) error { - // check if migration was already applied - var applied bool - if err := tx.QueryRow("SELECT EXISTS (SELECT 1 FROM migrations WHERE id = ?)", migration.ID).Scan(&applied); err != nil { - return fmt.Errorf("failed to check if migration '%s' was already applied: %w", migration.ID, err) - } else if applied { - return nil - } - - // defer foreign_keys to avoid triggering unwanted CASCADEs or - // constraint failures - if _, err := tx.Exec("PRAGMA defer_foreign_keys = ON"); err != nil { - return fmt.Errorf("failed to defer foreign keys: %w", err) - } - - // run migration - return migration.Migrate(tx) - }); err != nil { - return fmt.Errorf("migration '%s' failed: %w", migration.ID, err) - } - } return nil } diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index e5ac631ec..197b397db 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -3,7 +3,10 @@ package sqlite import ( "context" dsql "database/sql" + "fmt" + "strings" "time" + "unicode/utf8" "go.sia.tech/renterd/internal/sql" @@ -24,12 +27,70 @@ func NewMainDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Duration } } +func (b *MainDatabase) ApplyMigration(fn func(tx sql.Tx) error) error { + return applyMigration(b.db, fn) +} + func (b *MainDatabase) Close() error { return b.db.Close() } +func (b *MainDatabase) DB() *sql.DB { + return b.db +} + +func (b *MainDatabase) CreateMigrationTable() error { + return createMigrationTable(b.db) +} + +func (b *MainDatabase) MakeDirsForPath(tx sql.Tx, path string) (uint, error) { + insertDirStmt, err := tx.Prepare("INSERT INTO directories (name, db_parent_id) VALUES (?, ?) ON CONFLICT(name) DO NOTHING") + if err != nil { + return 0, fmt.Errorf("failed to prepare statement: %w", err) + } + defer insertDirStmt.Close() + + queryDirStmt, err := tx.Prepare("SELECT id FROM directories WHERE name = ?") + if err != nil { + return 0, fmt.Errorf("failed to prepare statement: %w", err) + } + defer queryDirStmt.Close() + + // Create root dir. + dirID := uint(sql.DirectoriesRootID) + if _, err := insertDirStmt.Exec('/', dirID); err != nil { + return 0, fmt.Errorf("failed to create root directory: %w", err) + } + + // Create remaining directories. + path = strings.TrimSuffix(path, "/") + if path == "/" { + return dirID, nil + } + for i := 0; i < utf8.RuneCountInString(path); i++ { + if path[i] != '/' { + continue + } + dir := path[:i+1] + if dir == "/" { + continue + } + if _, err := insertDirStmt.Exec(dir, dirID); err != nil { + return 0, fmt.Errorf("failed to create directory %v: %w", dir, err) + } + var childID uint + if err := queryDirStmt.QueryRow(dir).Scan(&childID); err != nil { + return 0, fmt.Errorf("failed to fetch directory id %v: %w", dir, err) + } else if childID == 0 { + return 0, fmt.Errorf("dir we just created doesn't exist - shouldn't happen") + } + dirID = childID + } + return dirID, nil +} + func (b *MainDatabase) Migrate() error { - return performMigrations(b.db, "main", sql.MainMigrations(migrationsFs, b.log)) + return sql.PerformMigrations(b, migrationsFs, "main", sql.MainMigrations(b, migrationsFs, b.log)) } func (b *MainDatabase) Version(_ context.Context) (string, string, error) { diff --git a/stores/sql/sqlite/metrics.go b/stores/sql/sqlite/metrics.go index 93dea240d..5ea76681a 100644 --- a/stores/sql/sqlite/metrics.go +++ b/stores/sql/sqlite/metrics.go @@ -24,14 +24,26 @@ func NewMetricsDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Durat } } +func (b *MetricsDatabase) ApplyMigration(fn func(tx sql.Tx) error) error { + return applyMigration(b.db, fn) +} + func (b *MetricsDatabase) Close() error { return b.db.Close() } +func (b *MetricsDatabase) DB() *sql.DB { + return b.db +} + +func (b *MetricsDatabase) CreateMigrationTable() error { + return createMigrationTable(b.db) +} + func (b *MetricsDatabase) Version(_ context.Context) (string, string, error) { return version(b.db) } func (b *MetricsDatabase) Migrate() error { - return performMigrations(b.db, "metrics", sql.MetricsMigrations(migrationsFs, b.log)) + return sql.PerformMigrations(b, migrationsFs, "metrics", sql.MetricsMigrations(migrationsFs, b.log)) } From c2dec0ca52876d943732d648bc7307399b3b5ce6 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 15 May 2024 15:23:07 +0200 Subject: [PATCH 51/52] sql: skip integrity check if migration didn't run --- internal/sql/migrations.go | 14 +++++++------- stores/sql/mysql/common.go | 7 +++++-- stores/sql/mysql/main.go | 2 +- stores/sql/mysql/metrics.go | 2 +- stores/sql/sqlite/common.go | 26 ++++++++++++++++++++------ stores/sql/sqlite/main.go | 2 +- stores/sql/sqlite/metrics.go | 2 +- 7 files changed, 36 insertions(+), 19 deletions(-) diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index f33ce0e90..bc95ddd05 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -19,7 +19,7 @@ type ( // Migrator is an interface for defining db - specific helper methods // required during migrations Migrator interface { - ApplyMigration(func(tx Tx) error) error + ApplyMigration(func(tx Tx) (bool, error)) error CreateMigrationTable() error DB() *DB } @@ -195,23 +195,23 @@ func PerformMigrations(m Migrator, fs embed.FS, identifier string, migrations [] // apply missing migrations for _, migration := range migrations { - if err := m.ApplyMigration(func(tx Tx) error { + if err := m.ApplyMigration(func(tx Tx) (bool, error) { // check if migration was already applied var applied bool if err := tx.QueryRow("SELECT EXISTS (SELECT 1 FROM migrations WHERE id = ?)", migration.ID).Scan(&applied); err != nil { - return fmt.Errorf("failed to check if migration '%s' was already applied: %w", migration.ID, err) + return false, fmt.Errorf("failed to check if migration '%s' was already applied: %w", migration.ID, err) } else if applied { - return nil + return false, nil } // run migration if err := migration.Migrate(tx); err != nil { - return fmt.Errorf("migration '%s' failed: %w", migration.ID, err) + return false, fmt.Errorf("migration '%s' failed: %w", migration.ID, err) } // insert migration if _, err := tx.Exec("INSERT INTO migrations (id) VALUES (?)", migration.ID); err != nil { - return fmt.Errorf("failed to insert migration '%s': %w", migration.ID, err) + return false, fmt.Errorf("failed to insert migration '%s': %w", migration.ID, err) } - return nil + return true, nil }); err != nil { return fmt.Errorf("migration '%s' failed: %w", migration.ID, err) } diff --git a/stores/sql/mysql/common.go b/stores/sql/mysql/common.go index 3182a8977..79c2cefb7 100644 --- a/stores/sql/mysql/common.go +++ b/stores/sql/mysql/common.go @@ -10,8 +10,11 @@ import ( //go:embed all:migrations/* var migrationsFs embed.FS -func applyMigration(db *sql.DB, fn func(tx sql.Tx) error) error { - return db.Transaction(fn) +func applyMigration(db *sql.DB, fn func(tx sql.Tx) (bool, error)) error { + return db.Transaction(func(tx sql.Tx) error { + _, err := fn(tx) + return err + }) } func createMigrationTable(db *sql.DB) error { diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 649bf05e4..cd4e388ac 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -27,7 +27,7 @@ func NewMainDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Duration } } -func (b *MainDatabase) ApplyMigration(fn func(tx sql.Tx) error) error { +func (b *MainDatabase) ApplyMigration(fn func(tx sql.Tx) (bool, error)) error { return applyMigration(b.db, fn) } diff --git a/stores/sql/mysql/metrics.go b/stores/sql/mysql/metrics.go index 3bba48536..7a4df1699 100644 --- a/stores/sql/mysql/metrics.go +++ b/stores/sql/mysql/metrics.go @@ -25,7 +25,7 @@ func NewMetricsDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Durat } } -func (b *MetricsDatabase) ApplyMigration(fn func(tx sql.Tx) error) error { +func (b *MetricsDatabase) ApplyMigration(fn func(tx sql.Tx) (bool, error)) error { return applyMigration(b.db, fn) } diff --git a/stores/sql/sqlite/common.go b/stores/sql/sqlite/common.go index 92a2b226f..c33fa948c 100644 --- a/stores/sql/sqlite/common.go +++ b/stores/sql/sqlite/common.go @@ -1,7 +1,9 @@ package sqlite import ( + dsql "database/sql" "embed" + "errors" "fmt" "go.sia.tech/renterd/internal/sql" @@ -10,14 +12,26 @@ import ( //go:embed all:migrations/* var migrationsFs embed.FS -func applyMigration(db *sql.DB, fn func(tx sql.Tx) error) error { +func applyMigration(db *sql.DB, fn func(tx sql.Tx) (bool, error)) (err error) { + if _, err := db.Exec("PRAGMA foreign_keys=OFF"); err != nil { + return fmt.Errorf("failed to disable foreign keys: %w", err) + } + defer func() { + _, err2 := db.Exec("PRAGMA foreign_keys=ON") + err = errors.Join(err, err2) + }() return db.Transaction(func(tx sql.Tx) error { - // defer foreign_keys to avoid triggering unwanted CASCADEs or - // constraint failures - if _, err := tx.Exec("PRAGMA defer_foreign_keys = ON"); err != nil { - return fmt.Errorf("failed to defer foreign keys: %w", err) + // execute migration + if migrated, err := fn(tx); err != nil { + return err + } else if !migrated { + return nil + } + // perform foreign key integrity check + if err := tx.QueryRow("PRAGMA foreign_key_check").Scan(); !errors.Is(err, dsql.ErrNoRows) { + return fmt.Errorf("foreign key constraints are not satisfied") } - return fn(tx) + return nil }) } diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index 197b397db..cd403f5f0 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -27,7 +27,7 @@ func NewMainDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Duration } } -func (b *MainDatabase) ApplyMigration(fn func(tx sql.Tx) error) error { +func (b *MainDatabase) ApplyMigration(fn func(tx sql.Tx) (bool, error)) error { return applyMigration(b.db, fn) } diff --git a/stores/sql/sqlite/metrics.go b/stores/sql/sqlite/metrics.go index 5ea76681a..8a8252e8f 100644 --- a/stores/sql/sqlite/metrics.go +++ b/stores/sql/sqlite/metrics.go @@ -24,7 +24,7 @@ func NewMetricsDatabase(db *dsql.DB, log *zap.SugaredLogger, lqd, ltd time.Durat } } -func (b *MetricsDatabase) ApplyMigration(fn func(tx sql.Tx) error) error { +func (b *MetricsDatabase) ApplyMigration(fn func(tx sql.Tx) (bool, error)) error { return applyMigration(b.db, fn) } From b0e3a32035d1290e1e28f6ab95ea4e48a5e0ae67 Mon Sep 17 00:00:00 2001 From: Chris Schinnerl Date: Wed, 15 May 2024 17:12:19 +0200 Subject: [PATCH 52/52] sql: address comments --- internal/sql/migrations.go | 2 +- stores/metadata.go | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index bc95ddd05..9d8295bcc 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -16,7 +16,7 @@ type ( Migrate func(tx Tx) error } - // Migrator is an interface for defining db - specific helper methods + // Migrator is an interface for defining database-specific helper methods // required during migrations Migrator interface { ApplyMigration(func(tx Tx) (bool, error)) error diff --git a/stores/metadata.go b/stores/metadata.go index 14d4d2975..ab042389b 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -14,6 +14,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/sql" "go.sia.tech/renterd/object" "go.sia.tech/siad/modules" "go.uber.org/zap" @@ -22,8 +23,6 @@ import ( "lukechampine.com/frand" ) -const rootDirID = 1 - const ( // batchDurationThreshold is the upper bound for the duration of a batch // operation on the database. As long as we are below the threshold, we @@ -1788,7 +1787,7 @@ func (s *SQLStore) dirID(tx *gorm.DB, dirPath string) (uint, error) { func makeDirsForPath(tx *gorm.DB, path string) (uint, error) { // Create root dir. - dirID := uint(rootDirID) + dirID := uint(sql.DirectoriesRootID) if err := tx.Model(&dbDirectory{}). Clauses(clause.OnConflict{ DoNothing: true, @@ -1867,7 +1866,7 @@ func (s *SQLStore) UpdateObject(ctx context.Context, bucket, path, contractSet, // create the dir dirID, err := makeDirsForPath(tx, path) if err != nil { - return fmt.Errorf("failed to create directories: %w", err) + return fmt.Errorf("failed to create directories for path '%s': %w", path, err) } // Insert a new object.