Skip to content

Commit 559c838

Browse files
authored
Storage hdd (#2265)
* WIP: enable using hdd for vms and other workloads * implement pools policy * disable hdd for workloads for now
1 parent 3b7358a commit 559c838

File tree

6 files changed

+128
-88
lines changed

6 files changed

+128
-88
lines changed

pkg/kernel/kernel.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,10 @@ const (
1818
VirtualMachine = "zos-debug-vm"
1919
// if disable-gpu flag is provided gpu feature will be disabled on that node
2020
DisableGPU = "disable-gpu"
21+
22+
// This allows the node to work without ssd disk. If ssd disk is available
23+
// it will still be preferred for workloads. Otherwise fall back on HDD
24+
MissingSSD = "missing-ssd"
2125
)
2226

2327
// Params represent the parameters passed to the kernel at boot

pkg/storage/device.go

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,13 @@ package storage
22

33
import (
44
"fmt"
5+
"slices"
56

67
"github.com/pkg/errors"
78
"github.com/rs/zerolog/log"
89
"github.com/threefoldtech/zos/pkg"
910
"github.com/threefoldtech/zos/pkg/gridtypes"
11+
"github.com/threefoldtech/zos/pkg/storage/filesystem"
1012
)
1113

1214
const (
@@ -17,7 +19,7 @@ const (
1719
func (m *Module) Devices() ([]pkg.Device, error) {
1820
var devices []pkg.Device
1921
log.Debug().Int("disks", len(m.hdds)).Msg("listing zdb devices")
20-
for _, hdd := range m.hdds {
22+
for _, hdd := range m.pools(PolicyHDDOnly) {
2123
log.Debug().Str("device", hdd.Path()).Msg("checking device")
2224
if _, err := hdd.Mounted(); err != nil {
2325
log.Debug().Str("device", hdd.Path()).Msg("not mounted")
@@ -29,10 +31,12 @@ func (m *Module) Devices() ([]pkg.Device, error) {
2931
log.Error().Err(err).Str("pool", hdd.Name()).Msg("failed to get pool volumes")
3032
continue
3133
}
34+
3235
usage, err := hdd.Usage()
3336
if err != nil {
3437
return nil, err
3538
}
39+
3640
for _, vol := range volumes {
3741
log.Debug().Str("volume", vol.Path()).Str("name", vol.Name()).Msg("checking volume")
3842
if vol.Name() != zdbVolume {
@@ -56,7 +60,7 @@ func (m *Module) Devices() ([]pkg.Device, error) {
5660

5761
// DeviceLookup looks up device by name
5862
func (m *Module) DeviceLookup(name string) (pkg.Device, error) {
59-
for _, hdd := range m.hdds {
63+
for _, hdd := range m.pools(PolicyHDDOnly) {
6064
if hdd.Name() != name {
6165
continue
6266
}
@@ -98,12 +102,7 @@ func (m *Module) DeviceLookup(name string) (pkg.Device, error) {
98102
// DeviceAllocate allocates a new free device, allocation is done
99103
// by creation a zdb subvolume
100104
func (m *Module) DeviceAllocate(min gridtypes.Unit) (pkg.Device, error) {
101-
for _, hdd := range m.hdds {
102-
if _, err := hdd.Mounted(); err == nil {
103-
// mounted pool. skip
104-
continue
105-
}
106-
105+
for _, hdd := range m.pools(PolicyHDDOnly) {
107106
if hdd.Device().Size < uint64(min) {
108107
continue
109108
}
@@ -119,20 +118,29 @@ func (m *Module) DeviceAllocate(min gridtypes.Unit) (pkg.Device, error) {
119118
continue
120119
}
121120

122-
if len(volumes) != 0 {
123-
log.Info().Str("pool", hdd.Name()).Msg("pool is already used")
121+
exists := slices.ContainsFunc(volumes, func(vol filesystem.Volume) bool {
122+
return vol.Name() == zdbVolume
123+
})
124+
125+
if exists {
126+
// a zdb volume already existws
124127
continue
125128
}
126129

127-
volume, err := hdd.AddVolume(zdbVolume)
130+
usage, err := hdd.Usage()
128131
if err != nil {
129-
log.Error().Err(err).Msg("failed to create zdb volume")
132+
return pkg.Device{}, err
133+
}
134+
135+
if usage.Used+uint64(min) > usage.Size {
136+
// not enough space
130137
continue
131138
}
132139

133-
usage, err := hdd.Usage()
140+
volume, err := hdd.AddVolume(zdbVolume)
134141
if err != nil {
135-
return pkg.Device{}, err
142+
log.Error().Err(err).Msg("failed to create zdb volume")
143+
continue
136144
}
137145

138146
return pkg.Device{

pkg/storage/disk.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ const (
2525
func (s *Module) diskPools() ([]string, error) {
2626

2727
var paths []string
28-
for _, pool := range s.ssds {
28+
for _, pool := range s.pools(PolicySSDFirst) {
2929
if _, err := pool.Mounted(); err != nil {
3030
continue
3131
}
@@ -45,9 +45,9 @@ func (s *Module) diskPools() ([]string, error) {
4545
return paths, nil
4646
}
4747

48-
// VDiskFindCandidate find a suitbale location for creating a vdisk of the given size
48+
// VDiskFindCandidate find a suitable location for creating a vdisk of the given size
4949
func (s *Module) diskFindCandidate(size gridtypes.Unit) (path string, err error) {
50-
candidates, err := s.findCandidates(size)
50+
candidates, err := s.findCandidates(size, PolicySSDFirst)
5151
if err != nil {
5252
return path, err
5353
}
@@ -78,13 +78,13 @@ func (s *Module) diskFindCandidate(size gridtypes.Unit) (path string, err error)
7878
}
7979

8080
func (s *Module) findDisk(id string) (string, error) {
81-
pools, err := s.diskPools()
81+
vdiskVolumes, err := s.diskPools()
8282
if err != nil {
8383
return "", errors.Wrapf(err, "failed to find disk with id '%s'", id)
8484
}
8585

86-
for _, pool := range pools {
87-
path, err := s.safePath(pool, id)
86+
for _, volumePath := range vdiskVolumes {
87+
path, err := s.safePath(volumePath, id)
8888
if err != nil {
8989
return "", err
9090
}

pkg/storage/pools.go

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
package storage
2+
3+
import (
4+
"slices"
5+
6+
"github.com/threefoldtech/zos/pkg/kernel"
7+
"github.com/threefoldtech/zos/pkg/storage/filesystem"
8+
)
9+
10+
// utils for pool ordering and presence
11+
12+
// Policy needed
13+
type Policy func(s *Module) []filesystem.Pool
14+
15+
func poolCmp(a, b filesystem.Pool) int {
16+
_, errA := a.Mounted()
17+
_, errB := b.Mounted()
18+
19+
// if the 2 pools has the same mount state, then both can
20+
// be 0
21+
if errA != nil && errB != nil || errA == nil && errB == nil {
22+
return 0
23+
} else if errA == nil {
24+
// mounted pool comes first
25+
return -1
26+
} else {
27+
return 1
28+
}
29+
}
30+
31+
func PolicySSDOnly(s *Module) []filesystem.Pool {
32+
slices.SortFunc(s.ssds, poolCmp)
33+
return s.ssds
34+
}
35+
36+
func PolicyHDDOnly(s *Module) []filesystem.Pool {
37+
slices.SortFunc(s.hdds, poolCmp)
38+
return s.hdds
39+
}
40+
41+
func PolicySSDFirst(s *Module) []filesystem.Pool {
42+
pools := PolicySSDOnly(s)
43+
44+
// if missing ssd is supported, this policy
45+
// will also use the hdd pools for provisioning
46+
// and cache
47+
//
48+
// TODO: this is fully disabled now. and a ssd first policy
49+
// will act like a ssd-only policy.
50+
// to enable workloads on hdd, drop the false part.
51+
if false && kernel.GetParams().Exists(kernel.MissingSSD) {
52+
pools = append(pools, PolicyHDDOnly(s)...)
53+
}
54+
55+
return pools
56+
}
57+
58+
// get available pools in defined presence
59+
func (s *Module) pools(policy Policy) []filesystem.Pool {
60+
return policy(s)
61+
}

0 commit comments

Comments
 (0)