Skip to content

Commit 954fe69

Browse files
craig[bot]dt
andcommitted
Merge #45764
45764: cloud: require nodeID in nodelocal URIs r=dt a=dt Previously nodelocal URIs without a host specified, or with a nodeID of 0, would be interpreted as 'which ever node happens to be evaluating this URI'. This behavior often leads to confusion and typically a user is better served by specifying which node, e.g. with nodelocal://1/foo/bar. This change now *requires* users specify a nodeID in nodelocal URIs. If a user truely wants the old behavior of picking the local node, they can speicfy a nodeID of zero or use the reserved hostname 'self'. Release note (general change): nodelocal:// format URIs now require a nodeID be specified in the hostname field. The special nodeID of 'self' is equivalent to the old behavior when unspecified. Co-authored-by: David Taylor <tinystatemachine@gmail.com>
2 parents ad84601 + 3fc3e14 commit 954fe69

20 files changed

Lines changed: 151 additions & 145 deletions

pkg/ccl/backupccl/backup_test.go

Lines changed: 51 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ const (
6969
multiNode = 3
7070
backupRestoreDefaultRanges = 10
7171
backupRestoreRowPayloadSize = 100
72-
localFoo = "nodelocal:///foo"
72+
localFoo = "nodelocal://0/foo"
7373
)
7474

7575
func backupRestoreTestSetupEmptyWithParams(
@@ -1057,27 +1057,27 @@ func TestBackupRestoreResume(t *testing.T) {
10571057
if err != nil {
10581058
t.Fatal(err)
10591059
}
1060-
backupDir := filepath.Join(dir, "backup")
1060+
backupDir := dir + "/backup"
10611061
if err := os.MkdirAll(backupDir, 0755); err != nil {
10621062
t.Fatal(err)
10631063
}
1064-
checkpointFile := filepath.Join(backupDir, backupccl.BackupManifestCheckpointName)
1064+
checkpointFile := backupDir + "/" + backupccl.BackupManifestCheckpointName
10651065
if err := ioutil.WriteFile(checkpointFile, mockManifest, 0644); err != nil {
10661066
t.Fatal(err)
10671067
}
10681068
createAndWaitForJob(
10691069
t, sqlDB, []sqlbase.ID{backupTableDesc.ID},
10701070
jobspb.BackupDetails{
10711071
EndTime: tc.Servers[0].Clock().Now(),
1072-
URI: "nodelocal:///backup",
1072+
URI: "nodelocal://0/backup",
10731073
BackupManifest: mockManifest,
10741074
},
10751075
jobspb.BackupProgress{},
10761076
)
10771077

10781078
// If the backup properly took the (incorrect) checkpoint into account, it
10791079
// won't have tried to re-export any keys within backupCompletedSpan.
1080-
backupManifestFile := filepath.Join(backupDir, backupccl.BackupManifestName)
1080+
backupManifestFile := backupDir + "/" + backupccl.BackupManifestName
10811081
backupManifestBytes, err := ioutil.ReadFile(backupManifestFile)
10821082
if err != nil {
10831083
t.Fatal(err)
@@ -1095,7 +1095,7 @@ func TestBackupRestoreResume(t *testing.T) {
10951095

10961096
t.Run("restore", func(t *testing.T) {
10971097
sqlDB := sqlutils.MakeSQLRunner(outerDB.DB)
1098-
restoreDir := "nodelocal:///restore"
1098+
restoreDir := "nodelocal://0/restore"
10991099
sqlDB.Exec(t, `BACKUP DATABASE DATA TO $1`, restoreDir)
11001100
sqlDB.Exec(t, `CREATE DATABASE restoredb`)
11011101
restoreDatabaseID := sqlutils.QueryDatabaseID(t, sqlDB.DB, "restoredb")
@@ -1216,7 +1216,7 @@ func TestBackupRestoreControlJob(t *testing.T) {
12161216
sqlDB := sqlutils.MakeSQLRunner(outerDB.DB)
12171217

12181218
t.Run("foreign", func(t *testing.T) {
1219-
foreignDir := "nodelocal:///foreign"
1219+
foreignDir := "nodelocal://0/foreign"
12201220
sqlDB.Exec(t, `CREATE DATABASE orig_fkdb`)
12211221
sqlDB.Exec(t, `CREATE DATABASE restore_fkdb`)
12221222
sqlDB.Exec(t, `CREATE TABLE orig_fkdb.fk (i INT REFERENCES data.bank)`)
@@ -1245,8 +1245,8 @@ func TestBackupRestoreControlJob(t *testing.T) {
12451245
})
12461246

12471247
t.Run("pause", func(t *testing.T) {
1248-
pauseDir := "nodelocal:///pause"
1249-
noOfflineDir := "nodelocal:///no-offline"
1248+
pauseDir := "nodelocal://0/pause"
1249+
noOfflineDir := "nodelocal://0/no-offline"
12501250
sqlDB.Exec(t, `CREATE DATABASE pause`)
12511251

12521252
for i, query := range []string{
@@ -1285,7 +1285,7 @@ func TestBackupRestoreControlJob(t *testing.T) {
12851285
})
12861286

12871287
t.Run("pause-cancel", func(t *testing.T) {
1288-
backupDir := "nodelocal:///backup"
1288+
backupDir := "nodelocal://0/backup"
12891289

12901290
backupJobID, err := jobutils.RunJob(t, sqlDB, &allowResponse, nil, "BACKUP DATABASE data TO $1", backupDir)
12911291
if err != nil {
@@ -1317,7 +1317,7 @@ func TestBackupRestoreControlJob(t *testing.T) {
13171317
})
13181318

13191319
t.Run("cancel", func(t *testing.T) {
1320-
cancelDir := "nodelocal:///cancel"
1320+
cancelDir := "nodelocal://0/cancel"
13211321
sqlDB.Exec(t, `CREATE DATABASE cancel`)
13221322

13231323
for i, query := range []string{
@@ -1360,7 +1360,7 @@ func TestRestoreFailCleanup(t *testing.T) {
13601360
initNone, base.TestClusterArgs{ServerArgs: params})
13611361
defer cleanup()
13621362

1363-
dir = filepath.Join(dir, "foo")
1363+
dir = dir + "/foo"
13641364

13651365
sqlDB.Exec(t, `CREATE DATABASE restore`)
13661366
sqlDB.Exec(t, `BACKUP DATABASE data TO $1`, localFoo)
@@ -1407,7 +1407,7 @@ func TestRestoreFailDatabaseCleanup(t *testing.T) {
14071407
initNone, base.TestClusterArgs{ServerArgs: params})
14081408
defer cleanup()
14091409

1410-
dir = filepath.Join(dir, "foo")
1410+
dir = dir + "/foo"
14111411

14121412
sqlDB.Exec(t, `BACKUP DATABASE data TO $1`, localFoo)
14131413
// Bugger the backup by removing the SST files.
@@ -1902,7 +1902,7 @@ func TestBackupRestoreIncremental(t *testing.T) {
19021902

19031903
checksums = append(checksums, checksumBankPayload(t, sqlDB))
19041904

1905-
backupDir := fmt.Sprintf("nodelocal:///%d", backupNum)
1905+
backupDir := fmt.Sprintf("nodelocal://0/%d", backupNum)
19061906
var from string
19071907
if backupNum > 0 {
19081908
from = fmt.Sprintf(` INCREMENTAL FROM %s`, strings.Join(backupDirs, `,`))
@@ -1918,9 +1918,9 @@ func TestBackupRestoreIncremental(t *testing.T) {
19181918
sqlDB.Exec(t, `INSERT INTO data.bank VALUES (0, -1, 'final')`)
19191919
checksums = append(checksums, checksumBankPayload(t, sqlDB))
19201920
sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE data.bank TO '%s' %s`,
1921-
"nodelocal:///final", fmt.Sprintf(` INCREMENTAL FROM %s`, strings.Join(backupDirs, `,`)),
1921+
"nodelocal://0/final", fmt.Sprintf(` INCREMENTAL FROM %s`, strings.Join(backupDirs, `,`)),
19221922
))
1923-
backupDirs = append(backupDirs, `'nodelocal:///final'`)
1923+
backupDirs = append(backupDirs, `'nodelocal://0/final'`)
19241924
}
19251925

19261926
// Start a new cluster to restore into.
@@ -1940,7 +1940,7 @@ func TestBackupRestoreIncremental(t *testing.T) {
19401940
sqlDBRestore.ExpectErr(
19411941
t, fmt.Sprintf("belongs to cluster %s", tc.Servers[0].ClusterID()),
19421942
`BACKUP TABLE data.bank TO $1 INCREMENTAL FROM $2`,
1943-
"nodelocal:///some-other-table", "nodelocal:///0",
1943+
"nodelocal://0/some-other-table", "nodelocal://0/0",
19441944
)
19451945

19461946
for i := len(backupDirs); i > 0; i-- {
@@ -1971,8 +1971,8 @@ func TestBackupRestorePartitionedIncremental(t *testing.T) {
19711971

19721972
// Each incremental backup is written to two different subdirectories in
19731973
// defaultDir and dc1Dir, respectively.
1974-
const defaultDir = "nodelocal:///default"
1975-
const dc1Dir = "nodelocal:///dc=dc1"
1974+
const defaultDir = "nodelocal://0/default"
1975+
const dc1Dir = "nodelocal://0/dc=dc1"
19761976
var defaultBackupDirs []string
19771977
var checksums []uint32
19781978
{
@@ -2157,7 +2157,7 @@ func TestConcurrentBackupRestores(t *testing.T) {
21572157
g.Go(func() error {
21582158
for j := 0; j < numIterations; j++ {
21592159
dbName := fmt.Sprintf("%s_%d", table, j)
2160-
backupDir := fmt.Sprintf("nodelocal:///%s", dbName)
2160+
backupDir := fmt.Sprintf("nodelocal://0/%s", dbName)
21612161
backupQ := fmt.Sprintf(`BACKUP data.%s TO $1`, table)
21622162
if _, err := sqlDB.DB.ExecContext(gCtx, backupQ, backupDir); err != nil {
21632163
return err
@@ -2215,9 +2215,9 @@ func TestBackupAsOfSystemTime(t *testing.T) {
22152215
t.Fatalf("expected %d rows but found %d", expected, rowCount)
22162216
}
22172217

2218-
beforeDir := filepath.Join(localFoo, `beforeTs`)
2218+
beforeDir := localFoo + `/beforeTs`
22192219
sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE data TO '%s' AS OF SYSTEM TIME %s`, beforeDir, beforeTs))
2220-
equalDir := filepath.Join(localFoo, `equalTs`)
2220+
equalDir := localFoo + `/equalTs`
22212221
sqlDB.Exec(t, fmt.Sprintf(`BACKUP DATABASE data TO '%s' AS OF SYSTEM TIME %s`, equalDir, equalTs))
22222222

22232223
sqlDB.Exec(t, `DROP TABLE data.bank`)
@@ -2241,7 +2241,7 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
22412241
const numAccounts = 10
22422242
ctx, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
22432243
defer cleanupFn()
2244-
const dir = "nodelocal:///"
2244+
const dir = "nodelocal://0/"
22452245

22462246
ts := make([]string, 9)
22472247

@@ -2265,8 +2265,8 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
22652265
t.Fatal(err)
22662266
}
22672267

2268-
fullBackup, latestBackup := filepath.Join(dir, "full"), filepath.Join(dir, "latest")
2269-
incBackup, incLatestBackup := filepath.Join(dir, "inc"), filepath.Join(dir, "inc-latest")
2268+
fullBackup, latestBackup := dir+"/full", dir+"/latest"
2269+
incBackup, incLatestBackup := dir+"/inc", dir+"/inc-latest"
22702270
inc2Backup, inc2LatestBackup := incBackup+".2", incLatestBackup+".2"
22712271

22722272
sqlDB.Exec(t,
@@ -2278,7 +2278,7 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
22782278
latestBackup,
22792279
)
22802280

2281-
fullTableBackup := filepath.Join(dir, "tbl")
2281+
fullTableBackup := dir + "/tbl"
22822282
sqlDB.Exec(t,
22832283
fmt.Sprintf(`BACKUP data.bank TO $1 AS OF SYSTEM TIME %s WITH revision_history`, ts[2]),
22842284
fullTableBackup,
@@ -2337,7 +2337,7 @@ func TestRestoreAsOfSystemTime(t *testing.T) {
23372337
inc2LatestBackup, latestBackup, incLatestBackup,
23382338
)
23392339

2340-
incTableBackup := filepath.Join(dir, "inctbl")
2340+
incTableBackup := dir + "/inctbl"
23412341
sqlDB.Exec(t,
23422342
`BACKUP data.bank TO $1 INCREMENTAL FROM $2 WITH revision_history`,
23432343
incTableBackup, fullTableBackup,
@@ -2464,7 +2464,7 @@ func TestRestoreAsOfSystemTimeGCBounds(t *testing.T) {
24642464
const numAccounts = 10
24652465
ctx, tc, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
24662466
defer cleanupFn()
2467-
const dir = "nodelocal:///"
2467+
const dir = "nodelocal://0/"
24682468
preGC := tree.TimestampToDecimal(tc.Server(0).Clock().Now()).String()
24692469

24702470
gcr := roachpb.GCRequest{
@@ -2483,7 +2483,7 @@ func TestRestoreAsOfSystemTimeGCBounds(t *testing.T) {
24832483

24842484
postGC := tree.TimestampToDecimal(tc.Server(0).Clock().Now()).String()
24852485

2486-
lateFullTableBackup := filepath.Join(dir, "tbl-after-gc")
2486+
lateFullTableBackup := dir + "/tbl-after-gc"
24872487
sqlDB.Exec(t, `BACKUP data.bank TO $1 WITH revision_history`, lateFullTableBackup)
24882488
sqlDB.Exec(t, `DROP TABLE data.bank`)
24892489
sqlDB.ExpectErr(
@@ -2583,10 +2583,10 @@ func TestTimestampMismatch(t *testing.T) {
25832583
sqlDB.Exec(t, `CREATE TABLE data.t2 (a INT PRIMARY KEY)`)
25842584
sqlDB.Exec(t, `INSERT INTO data.t2 VALUES (1)`)
25852585

2586-
fullBackup := filepath.Join(localFoo, "0")
2587-
incrementalT1FromFull := filepath.Join(localFoo, "1")
2588-
incrementalT2FromT1 := filepath.Join(localFoo, "2")
2589-
incrementalT3FromT1OneTable := filepath.Join(localFoo, "3")
2586+
fullBackup := localFoo + "/0"
2587+
incrementalT1FromFull := localFoo + "/1"
2588+
incrementalT2FromT1 := localFoo + "/2"
2589+
incrementalT3FromT1OneTable := localFoo + "/3"
25902590

25912591
sqlDB.Exec(t, `BACKUP DATABASE data TO $1`,
25922592
fullBackup)
@@ -2896,7 +2896,7 @@ func TestBackupRestorePermissions(t *testing.T) {
28962896
sqlDB.Exec(t, "GRANT admin TO testuser")
28972897

28982898
t.Run("backup-table", func(t *testing.T) {
2899-
testLocalFoo := fmt.Sprintf("nodelocal:///%s", t.Name())
2899+
testLocalFoo := fmt.Sprintf("nodelocal://0/%s", t.Name())
29002900
testLocalBackupStmt := fmt.Sprintf(`BACKUP data.bank TO '%s'`, testLocalFoo)
29012901
if _, err := testuser.Exec(testLocalBackupStmt); err != nil {
29022902
t.Fatal(err)
@@ -2908,7 +2908,7 @@ func TestBackupRestorePermissions(t *testing.T) {
29082908
})
29092909

29102910
t.Run("backup-database", func(t *testing.T) {
2911-
testLocalFoo := fmt.Sprintf("nodelocal:///%s", t.Name())
2911+
testLocalFoo := fmt.Sprintf("nodelocal://0/%s", t.Name())
29122912
testLocalBackupStmt := fmt.Sprintf(`BACKUP DATABASE data TO '%s'`, testLocalFoo)
29132913
if _, err := testuser.Exec(testLocalBackupStmt); err != nil {
29142914
t.Fatal(err)
@@ -2940,9 +2940,9 @@ func TestRestoreDatabaseVersusTable(t *testing.T) {
29402940
origDB.Exec(t, q)
29412941
}
29422942

2943-
d4foo := "nodelocal:///d4foo"
2944-
d4foobar := "nodelocal:///d4foobar"
2945-
d4star := "nodelocal:///d4star"
2943+
d4foo := "nodelocal://0/d4foo"
2944+
d4foobar := "nodelocal://0/d4foobar"
2945+
d4star := "nodelocal://0/d4star"
29462946

29472947
origDB.Exec(t, `BACKUP DATABASE data, d2, d3, d4 TO $1`, localFoo)
29482948
origDB.Exec(t, `BACKUP d4.foo TO $1`, d4foo)
@@ -3063,12 +3063,12 @@ func TestPointInTimeRecovery(t *testing.T) {
30633063
_, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
30643064
defer cleanupFn()
30653065

3066-
fullBackupDir := filepath.Join(localFoo, "full")
3066+
fullBackupDir := localFoo + "/full"
30673067
sqlDB.Exec(t, `BACKUP data.* TO $1`, fullBackupDir)
30683068

30693069
sqlDB.Exec(t, `UPDATE data.bank SET balance = 2`)
30703070

3071-
incBackupDir := filepath.Join(localFoo, "inc")
3071+
incBackupDir := localFoo + "/inc"
30723072
sqlDB.Exec(t, `BACKUP data.* TO $1 INCREMENTAL FROM $2`, incBackupDir, fullBackupDir)
30733073

30743074
var beforeBadThingTs string
@@ -3089,7 +3089,7 @@ func TestPointInTimeRecovery(t *testing.T) {
30893089
// RENAME-ing the table into the final location.
30903090
t.Run("recovery=new-backup", func(t *testing.T) {
30913091
sqlDB = sqlutils.MakeSQLRunner(sqlDB.DB)
3092-
recoveryDir := filepath.Join(localFoo, "new-backup")
3092+
recoveryDir := localFoo + "/new-backup"
30933093
sqlDB.Exec(t,
30943094
fmt.Sprintf(`BACKUP data.* TO $1 AS OF SYSTEM TIME '%s'`, beforeBadThingTs),
30953095
recoveryDir,
@@ -3111,7 +3111,7 @@ func TestPointInTimeRecovery(t *testing.T) {
31113111
// using that. Everything else works the same as above.
31123112
t.Run("recovery=inc-backup", func(t *testing.T) {
31133113
sqlDB = sqlutils.MakeSQLRunner(sqlDB.DB)
3114-
recoveryDir := filepath.Join(localFoo, "inc-backup")
3114+
recoveryDir := localFoo + "/inc-backup"
31153115
sqlDB.Exec(t,
31163116
fmt.Sprintf(`BACKUP data.* TO $1 AS OF SYSTEM TIME '%s' INCREMENTAL FROM $2, $3`, beforeBadThingTs),
31173117
recoveryDir, fullBackupDir, incBackupDir,
@@ -3181,7 +3181,7 @@ func TestBackupRestoreIncrementalAddTable(t *testing.T) {
31813181
defer cleanupFn()
31823182
sqlDB.Exec(t, `CREATE DATABASE data2`)
31833183
sqlDB.Exec(t, `CREATE TABLE data.t (s string PRIMARY KEY)`)
3184-
full, inc := filepath.Join(localFoo, "full"), filepath.Join(localFoo, "inc")
3184+
full, inc := localFoo+"/full", localFoo+"/inc"
31853185

31863186
sqlDB.Exec(t, `INSERT INTO data.t VALUES ('before')`)
31873187
sqlDB.Exec(t, `BACKUP data.*, data2.* TO $1`, full)
@@ -3199,7 +3199,7 @@ func TestBackupRestoreIncrementalAddTableMissing(t *testing.T) {
31993199
defer cleanupFn()
32003200
sqlDB.Exec(t, `CREATE DATABASE data2`)
32013201
sqlDB.Exec(t, `CREATE TABLE data.t (s string PRIMARY KEY)`)
3202-
full, inc := filepath.Join(localFoo, "full"), filepath.Join(localFoo, "inc")
3202+
full, inc := localFoo+"/full", localFoo+"/inc"
32033203

32043204
sqlDB.Exec(t, `INSERT INTO data.t VALUES ('before')`)
32053205
sqlDB.Exec(t, `BACKUP data.* TO $1`, full)
@@ -3219,7 +3219,7 @@ func TestBackupRestoreIncrementalTrucateTable(t *testing.T) {
32193219
_, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
32203220
defer cleanupFn()
32213221
sqlDB.Exec(t, `CREATE TABLE data.t (s string PRIMARY KEY)`)
3222-
full, inc := filepath.Join(localFoo, "full"), filepath.Join(localFoo, "inc")
3222+
full, inc := localFoo+"/full", localFoo+"/inc"
32233223

32243224
sqlDB.Exec(t, `INSERT INTO data.t VALUES ('before')`)
32253225
sqlDB.Exec(t, `BACKUP DATABASE data TO $1`, full)
@@ -3236,7 +3236,7 @@ func TestBackupRestoreIncrementalDropTable(t *testing.T) {
32363236
_, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
32373237
defer cleanupFn()
32383238
sqlDB.Exec(t, `CREATE TABLE data.t (s string PRIMARY KEY)`)
3239-
full, inc := filepath.Join(localFoo, "full"), filepath.Join(localFoo, "inc")
3239+
full, inc := localFoo+"/full", localFoo+"/inc"
32403240

32413241
sqlDB.Exec(t, `INSERT INTO data.t VALUES ('before')`)
32423242
sqlDB.Exec(t, `BACKUP DATABASE data TO $1`, full)
@@ -3263,7 +3263,7 @@ func TestFileIOLimits(t *testing.T) {
32633263
_, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
32643264
defer cleanupFn()
32653265

3266-
elsewhere := "nodelocal:///../../blah"
3266+
elsewhere := "nodelocal://0/../../blah"
32673267

32683268
sqlDB.Exec(t, `BACKUP data.bank TO $1`, localFoo)
32693269
sqlDB.ExpectErr(
@@ -3437,8 +3437,8 @@ func TestBackupRestoreShowJob(t *testing.T) {
34373437

34383438
sqlDB.Exec(t, `RESTORE data.bank FROM $1 WITH skip_missing_foreign_keys, into_db = $2`, localFoo, "data 2")
34393439
sqlDB.CheckQueryResults(t, "SELECT description FROM [SHOW JOBS] ORDER BY description", [][]string{
3440-
{"BACKUP DATABASE data TO 'nodelocal:///foo' WITH revision_history"},
3441-
{"RESTORE TABLE data.bank FROM 'nodelocal:///foo' WITH into_db = 'data 2', skip_missing_foreign_keys"},
3440+
{"BACKUP DATABASE data TO 'nodelocal://0/foo' WITH revision_history"},
3441+
{"RESTORE TABLE data.bank FROM 'nodelocal://0/foo' WITH into_db = 'data 2', skip_missing_foreign_keys"},
34423442
})
34433443
}
34443444

@@ -3523,8 +3523,8 @@ func TestBackupRestoreSubsetCreatedStats(t *testing.T) {
35233523
func TestBackupCreatedStatsFromIncrementalBackup(t *testing.T) {
35243524
defer leaktest.AfterTest(t)()
35253525

3526-
const incremental1Foo = "nodelocal:///incremental1foo"
3527-
const incremental2Foo = "nodelocal:///incremental2foo"
3526+
const incremental1Foo = "nodelocal://0/incremental1foo"
3527+
const incremental2Foo = "nodelocal://0/incremental2foo"
35283528
const numAccounts = 1
35293529
_, _, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
35303530
defer cleanupFn()

pkg/ccl/backupccl/bench_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ func BenchmarkClusterRestore(b *testing.B) {
9090
b.SetBytes(backup.Desc.EntryCounts.DataSize / int64(b.N))
9191

9292
b.ResetTimer()
93-
sqlDB.Exec(b, `RESTORE data.* FROM 'nodelocal:///foo'`)
93+
sqlDB.Exec(b, `RESTORE data.* FROM 'nodelocal://0/foo'`)
9494
b.StopTimer()
9595
}
9696

pkg/ccl/backupccl/full_cluster_backup_restore_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ func TestFullClusterBackup(t *testing.T) {
5151
sqlDB.Exec(t, `ALTER DATABASE data2 CONFIGURE ZONE USING gc.ttlseconds = 900`)
5252
// Populate system.jobs.
5353
// Note: this is not the backup under test, this just serves as a job which should appear in the restore.
54-
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal:///throwawayjob'`)
54+
sqlDB.Exec(t, `BACKUP data.bank TO 'nodelocal://0/throwawayjob'`)
5555
preBackupJobs := sqlDB.QueryStr(t, "SELECT * FROM system.jobs")
5656
// Populate system.settings.
5757
sqlDB.Exec(t, `SET CLUSTER SETTING kv.bulk_io_write.concurrent_addsstable_requests = 5`)
@@ -190,7 +190,7 @@ func TestIncrementalFullClusterBackup(t *testing.T) {
190190
defer leaktest.AfterTest(t)()
191191

192192
const numAccounts = 10
193-
const incrementalBackupLocation = "nodelocal:///inc-full-backup"
193+
const incrementalBackupLocation = "nodelocal://0/inc-full-backup"
194194
_, _, sqlDB, tempDir, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, initNone)
195195
_, _, sqlDBRestore, cleanupEmptyCluster := backupRestoreTestSetupEmpty(t, singleNode, tempDir, initNone)
196196
defer cleanupFn()

0 commit comments

Comments
 (0)