Skip to content

Commit a5f0b96

Browse files
committed
Added support for multiple connections to the NN
1 parent b63e211 commit a5f0b96

12 files changed

+68
-52
lines changed

Dir.go

+8-8
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ func (dir *DirINode) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
148148
absolutePath := dir.AbsolutePath()
149149
loginfo("Read directory", Fields{Operation: ReadDir, Path: absolutePath})
150150

151-
allAttrs, err := dir.FileSystem.HdfsAccessor.ReadDir(absolutePath)
151+
allAttrs, err := dir.FileSystem.getDFSConnector().ReadDir(absolutePath)
152152
if err != nil {
153153
logwarn("Failed to list DFS directory", Fields{Operation: ReadDir, Path: absolutePath, Error: err})
154154
return nil, err
@@ -203,7 +203,7 @@ func (dir *DirINode) NodeFromAttrs(attrs Attrs) fs.Node {
203203
func (dir *DirINode) LookupAttrs(name string, attrs *Attrs) error {
204204

205205
var err error
206-
*attrs, err = dir.FileSystem.HdfsAccessor.Stat(path.Join(dir.AbsolutePath(), name))
206+
*attrs, err = dir.FileSystem.getDFSConnector().Stat(path.Join(dir.AbsolutePath(), name))
207207
if err != nil {
208208
// It is a warning as each time new file write tries to stat if the file exists
209209
loginfo("Stat failed", Fields{Operation: Stat, Path: path.Join(dir.AbsolutePath(), name), Error: err})
@@ -221,7 +221,7 @@ func (dir *DirINode) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node
221221
dir.mutex.Lock()
222222
defer dir.mutex.Unlock()
223223

224-
err := dir.FileSystem.HdfsAccessor.Mkdir(dir.AbsolutePathForChild(req.Name), req.Mode)
224+
err := dir.FileSystem.getDFSConnector().Mkdir(dir.AbsolutePathForChild(req.Name), req.Mode)
225225
if err != nil {
226226
return nil, err
227227
}
@@ -252,11 +252,11 @@ func (dir *DirINode) Remove(ctx context.Context, req *fuse.RemoveRequest) error
252252

253253
path := dir.AbsolutePathForChild(req.Name)
254254
loginfo("Removing path", Fields{Operation: Remove, Path: path})
255-
err := dir.FileSystem.HdfsAccessor.Remove(path)
255+
err := dir.FileSystem.getDFSConnector().Remove(path)
256256
if err == nil {
257257
dir.EntriesRemove(req.Name)
258258
} else {
259-
logerror("Failed to remove path", Fields{Operation: Remove, Path: path, Error: err})
259+
logwarn("Failed to remove path", Fields{Operation: Remove, Path: path, Error: err})
260260
}
261261
return err
262262
}
@@ -269,7 +269,7 @@ func (dir *DirINode) Rename(ctx context.Context, req *fuse.RenameRequest, newDir
269269
oldPath := dir.AbsolutePathForChild(req.OldName)
270270
newPath := newDir.(*DirINode).AbsolutePathForChild(req.NewName)
271271
loginfo("Renaming to "+newPath, Fields{Operation: Rename, Path: oldPath})
272-
err := dir.FileSystem.HdfsAccessor.Rename(oldPath, newPath)
272+
err := dir.FileSystem.getDFSConnector().Rename(oldPath, newPath)
273273
if err == nil {
274274
// Upon successful rename, updating in-memory representation of the file entry
275275
if node := dir.EntriesGet(req.OldName); node != nil {
@@ -297,7 +297,7 @@ func (dir *DirINode) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp
297297
if req.Valid.Mode() {
298298
loginfo("Setting attributes", Fields{Operation: Chmod, Path: path, Mode: req.Mode})
299299
(func() {
300-
err = dir.FileSystem.HdfsAccessor.Chmod(path, req.Mode)
300+
err = dir.FileSystem.getDFSConnector().Chmod(path, req.Mode)
301301
if err != nil {
302302
return
303303
}
@@ -324,7 +324,7 @@ func (dir *DirINode) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp
324324

325325
loginfo("Setting attributes", Fields{Operation: Chown, Path: path, User: u, UID: owner, GID: group})
326326
(func() {
327-
err = dir.FileSystem.HdfsAccessor.Chown(path, owner, group)
327+
err = dir.FileSystem.getDFSConnector().Chown(path, owner, group)
328328
if err != nil {
329329
return
330330
}

Dir_test.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ func TestAttributeCaching(t *testing.T) {
1717
mockCtrl := gomock.NewController(t)
1818
mockClock := &MockClock{}
1919
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
20-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
20+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
2121
root, _ := fs.Root()
2222
hdfsAccessor.EXPECT().Stat("/testDir").Return(Attrs{Name: "testDir", Mode: os.ModeDir | 0757}, nil)
2323
dir, err := root.(*DirINode).Lookup(nil, "testDir")
@@ -56,7 +56,7 @@ func TestReadDirWithFiltering(t *testing.T) {
5656
mockCtrl := gomock.NewController(t)
5757
mockClock := &MockClock{}
5858
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
59-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"foo", "bar"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
59+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"foo", "bar"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
6060
root, _ := fs.Root()
6161
hdfsAccessor.EXPECT().ReadDir("/").Return([]Attrs{
6262
{Name: "quz", Mode: os.ModeDir},
@@ -77,7 +77,7 @@ func TestReadDirWithZipExpansionDisabled(t *testing.T) {
7777
mockCtrl := gomock.NewController(t)
7878
mockClock := &MockClock{}
7979
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
80-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
80+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
8181
root, _ := fs.Root()
8282
hdfsAccessor.EXPECT().ReadDir("/").Return([]Attrs{
8383
{Name: "foo.zipx"},
@@ -97,7 +97,7 @@ func TestReadDirWithZipExpansionEnabled(t *testing.T) {
9797
mockCtrl := gomock.NewController(t)
9898
mockClock := &MockClock{}
9999
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
100-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"*"}, true, false, NewDefaultRetryPolicy(mockClock), mockClock)
100+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"*"}, true, false, NewDefaultRetryPolicy(mockClock), mockClock)
101101
root, _ := fs.Root()
102102
hdfsAccessor.EXPECT().ReadDir("/").Return([]Attrs{
103103
{Name: "foo.zipx"},
@@ -120,7 +120,7 @@ func TestLookupWithFiltering(t *testing.T) {
120120
mockCtrl := gomock.NewController(t)
121121
mockClock := &MockClock{}
122122
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
123-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"foo", "bar"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
123+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"foo", "bar"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
124124
root, _ := fs.Root()
125125
hdfsAccessor.EXPECT().Stat("/foo").Return(Attrs{Name: "foo", Mode: os.ModeDir}, nil)
126126
_, err := root.(*DirINode).Lookup(nil, "foo")
@@ -134,7 +134,7 @@ func TestMkdir(t *testing.T) {
134134
mockCtrl := gomock.NewController(t)
135135
mockClock := &MockClock{}
136136
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
137-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"foo", "bar"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
137+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"foo", "bar"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
138138
root, _ := fs.Root()
139139
hdfsAccessor.EXPECT().Mkdir("/foo", os.FileMode(0757)|os.ModeDir).Return(nil)
140140
node, err := root.(*DirINode).Mkdir(nil, &fuse.MkdirRequest{Name: "foo", Mode: os.FileMode(0757) | os.ModeDir})
@@ -147,7 +147,7 @@ func TestSetattr(t *testing.T) {
147147
mockCtrl := gomock.NewController(t)
148148
mockClock := &MockClock{}
149149
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
150-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"foo", "bar"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
150+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"foo", "bar"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
151151
root, _ := fs.Root()
152152
hdfsAccessor.EXPECT().Mkdir("/foo", os.FileMode(0757)|os.ModeDir).Return(nil)
153153
node, _ := root.(*DirINode).Mkdir(nil, &fuse.MkdirRequest{Name: "foo", Mode: os.FileMode(0757) | os.ModeDir})

File.go

+5-5
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ func (file *FileINode) Setattr(ctx context.Context, req *fuse.SetattrRequest, re
144144
if req.Valid.Mode() {
145145
loginfo("Setting attributes", Fields{Operation: Chmod, Path: path, Mode: req.Mode})
146146
(func() {
147-
err = file.FileSystem.HdfsAccessor.Chmod(path, req.Mode)
147+
err = file.FileSystem.getDFSConnector().Chmod(path, req.Mode)
148148
if err != nil {
149149
return
150150
}
@@ -171,7 +171,7 @@ func (file *FileINode) Setattr(ctx context.Context, req *fuse.SetattrRequest, re
171171

172172
loginfo("Setting attributes", Fields{Operation: Chown, Path: path, User: u, UID: owner, GID: group})
173173
(func() {
174-
err = file.FileSystem.HdfsAccessor.Chown(path, fmt.Sprint(req.Uid), fmt.Sprint(req.Gid))
174+
err = file.FileSystem.getDFSConnector().Chown(path, fmt.Sprint(req.Uid), fmt.Sprint(req.Gid))
175175
if err != nil {
176176
return
177177
}
@@ -199,7 +199,7 @@ func (file *FileINode) createStagingFile(operation string, existsInDFS bool) (*o
199199

200200
//create staging file
201201
absPath := file.AbsolutePath()
202-
hdfsAccessor := file.FileSystem.HdfsAccessor
202+
hdfsAccessor := file.FileSystem.getDFSConnector()
203203
if !existsInDFS { // it is a new file so create it in the DFS
204204
w, err := hdfsAccessor.CreateFile(absPath, file.Attrs.Mode, false)
205205
if err != nil {
@@ -234,7 +234,7 @@ func (file *FileINode) createStagingFile(operation string, existsInDFS bool) (*o
234234
}
235235

236236
func (file *FileINode) downloadToStaging(stagingFile *os.File, operation string) error {
237-
hdfsAccessor := file.FileSystem.HdfsAccessor
237+
hdfsAccessor := file.FileSystem.getDFSConnector()
238238
absPath := file.AbsolutePath()
239239

240240
reader, err := hdfsAccessor.OpenRead(absPath)
@@ -288,7 +288,7 @@ func (file *FileINode) NewFileHandle(existsInDFS bool, flags fuse.OpenFlags) (*F
288288
// then we upgrade the handle. However, if the file is already opened in
289289
// in RW state then we use the existing RW handle
290290
// if file.handle
291-
reader, _ := file.FileSystem.HdfsAccessor.OpenRead(file.AbsolutePath())
291+
reader, _ := file.FileSystem.getDFSConnector().OpenRead(file.AbsolutePath())
292292
fh.File.handle = &RemoteROFileProxy{hdfsReader: reader, file: file}
293293
loginfo("Opened file, RO handle", fh.logInfo(Fields{Operation: operation, Flags: fh.fileFlags}))
294294
}

FileHandleWriter_test.go

+6-5
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ func TestReadWriteFile(t *testing.T) {
2020
mockClock := &MockClock{}
2121
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
2222
fileName := "/testWriteFile_1"
23-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
23+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
2424

2525
hdfswriter := NewMockHdfsWriter(mockCtrl)
2626

@@ -59,7 +59,7 @@ func TestFaultTolerantWriteFile(t *testing.T) {
5959
mockClock := &MockClock{}
6060
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
6161
fileName := "/testWriteFile_1"
62-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
62+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
6363

6464
hdfswriter := NewMockHdfsWriter(mockCtrl)
6565

@@ -138,15 +138,16 @@ func TestFlushFile(t *testing.T) {
138138
hdfsAccessor.EXPECT().StatFs().Return(FsInfo{capacity: uint64(100), used: uint64(20), remaining: uint64(80)}, nil).AnyTimes()
139139
hdfsAccessor.EXPECT().Stat("/testWriteFile_2").Return(Attrs{Name: "testWriteFile_2"}, nil)
140140
fileName := "/testWriteFile_2"
141-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
141+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
142142

143143
hdfsAccessor.EXPECT().Remove(fileName).Return(nil).AnyTimes()
144-
hdfsAccessor.EXPECT().CreateFile(fileName, os.FileMode(0757), false).Return(hdfswriter, nil).AnyTimes()
144+
hdfsAccessor.EXPECT().CreateFile(fileName, os.FileMode(0757), true).Return(hdfswriter, nil).AnyTimes()
145145
hdfswriter.EXPECT().Close().Return(nil).AnyTimes()
146+
hdfswriter.EXPECT().Write([]byte("hello world")).Return(0, nil).AnyTimes()
146147

147148
// Test for newfilehandlewriter with existing file
148149
root, _ := fs.Root()
149-
file := root.(*DirINode).NodeFromAttrs(Attrs{Name: "testWriteFile_2"}).(*FileINode)
150+
file := root.(*DirINode).NodeFromAttrs(Attrs{Name: "testWriteFile_2", Mode: os.FileMode(0757)}).(*FileINode)
150151
fh, _ := file.Open(nil, &fuse.OpenRequest{}, &fuse.OpenResponse{})
151152
fileHandle := fh.(*FileHandle)
152153

FileSystem.go

+20-12
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,16 @@ import (
2020
)
2121

2222
type FileSystem struct {
23-
HdfsAccessor HdfsAccessor // Interface to access HDFS
24-
SrcDir string // Src directory that will mounted
25-
AllowedPrefixes []string // List of allowed path prefixes (only those prefixes are exposed via mountpoint)
26-
ExpandZips bool // Indicates whether ZIP expansion feature is enabled
27-
ReadOnly bool // Indicates whether mount filesystem with readonly
28-
Mounted bool // True if filesystem is mounted
29-
RetryPolicy *RetryPolicy // Retry policy
30-
Clock Clock // interface to get wall clock time
31-
FsInfo FsInfo // Usage of HDFS, including capacity, remaining, used sizes.
23+
HdfsAccessors []HdfsAccessor // Interface to access HDFS
24+
hdfsAccessorsIndex int
25+
SrcDir string // Src directory that will mounted
26+
AllowedPrefixes []string // List of allowed path prefixes (only those prefixes are exposed via mountpoint)
27+
ExpandZips bool // Indicates whether ZIP expansion feature is enabled
28+
ReadOnly bool // Indicates whether mount filesystem with readonly
29+
Mounted bool // True if filesystem is mounted
30+
RetryPolicy *RetryPolicy // Retry policy
31+
Clock Clock // interface to get wall clock time
32+
FsInfo FsInfo // Usage of HDFS, including capacity, remaining, used sizes.
3233

3334
closeOnUnmount []io.Closer // list of opened files (zip archives) to be closed on unmount
3435
closeOnUnmountLock sync.Mutex // mutex to protet closeOnUnmount
@@ -39,9 +40,9 @@ var _ fs.FS = (*FileSystem)(nil)
3940
var _ fs.FSStatfser = (*FileSystem)(nil)
4041

4142
// Creates an instance of mountable file system
42-
func NewFileSystem(hdfsAccessor HdfsAccessor, srcDir string, allowedPrefixes []string, expandZips bool, readOnly bool, retryPolicy *RetryPolicy, clock Clock) (*FileSystem, error) {
43+
func NewFileSystem(hdfsAccessors []HdfsAccessor, srcDir string, allowedPrefixes []string, expandZips bool, readOnly bool, retryPolicy *RetryPolicy, clock Clock) (*FileSystem, error) {
4344
return &FileSystem{
44-
HdfsAccessor: hdfsAccessor,
45+
HdfsAccessors: hdfsAccessors,
4546
Mounted: false,
4647
AllowedPrefixes: allowedPrefixes,
4748
ExpandZips: expandZips,
@@ -132,7 +133,7 @@ func (filesystem *FileSystem) CloseOnUnmount(file io.Closer) {
132133
// Statfs is called to obtain file system metadata.
133134
// It should write that data to resp.
134135
func (filesystem *FileSystem) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error {
135-
fsInfo, err := filesystem.HdfsAccessor.StatFs()
136+
fsInfo, err := filesystem.getDFSConnector().StatFs()
136137
if err != nil {
137138
logwarn("Stat DFS failed", Fields{Operation: StatFS, Error: err})
138139
return err
@@ -143,3 +144,10 @@ func (filesystem *FileSystem) Statfs(ctx context.Context, req *fuse.StatfsReques
143144
resp.Blocks = fsInfo.capacity / uint64(resp.Bsize)
144145
return nil
145146
}
147+
148+
func (filesystem *FileSystem) getDFSConnector() HdfsAccessor {
149+
filesystem.hdfsAccessorsIndex = filesystem.hdfsAccessorsIndex + 1
150+
index := filesystem.hdfsAccessorsIndex % len(filesystem.HdfsAccessors)
151+
loginfo(fmt.Sprintf("Client index %d. len %d", index, len(filesystem.HdfsAccessors)), nil)
152+
return filesystem.HdfsAccessors[index]
153+
}

FileSystemOperations_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ func withMount(t testing.TB, srcDir string, fn func(mntPath string, hdfsAccessor
181181
ftHdfsAccessor := NewFaultTolerantHdfsAccessor(hdfsAccessor, retryPolicy)
182182

183183
// Creating the virtual file system
184-
fileSystem, err := NewFileSystem(ftHdfsAccessor, srcDir, []string{"*"}, false, false, retryPolicy, WallClock{})
184+
fileSystem, err := NewFileSystem([]HdfsAccessor{ftHdfsAccessor}, srcDir, []string{"*"}, false, false, retryPolicy, WallClock{})
185185
if err != nil {
186186
t.Fatalf(fmt.Sprintf("Error/NewFileSystem: %v ", err), nil)
187187
}

FileSystem_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ func TestStatfs(t *testing.T) {
3333
mockCtrl := gomock.NewController(t)
3434
mockClock := &MockClock{}
3535
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
36-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
36+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"*"}, false, false, NewDefaultRetryPolicy(mockClock), mockClock)
3737

3838
hdfsAccessor.EXPECT().StatFs().Return(FsInfo{capacity: uint64(10240), remaining: uint64(1024)}, nil)
3939
fsInfo := &fuse.StatfsResponse{}

HopsFileHandle.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -111,13 +111,13 @@ func (fh *FileHandle) copyToDFS(operation string) error {
111111
return err
112112
}
113113
// Reconnect and try again
114-
fh.File.FileSystem.HdfsAccessor.Close()
114+
fh.File.FileSystem.getDFSConnector().Close()
115115
logwarn("Failed to copy file to DFS", fh.logInfo(Fields{Operation: operation}))
116116
}
117117
}
118118

119119
func (fh *FileHandle) FlushAttempt(operation string) error {
120-
hdfsAccessor := fh.File.FileSystem.HdfsAccessor
120+
hdfsAccessor := fh.File.FileSystem.getDFSConnector()
121121
w, err := hdfsAccessor.CreateFile(fh.File.AbsolutePath(), fh.File.Attrs.Mode, true)
122122
if err != nil {
123123
logerror("Error creating file in DFS", fh.logInfo(Fields{Operation: operation, Error: err}))

Log.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ const (
6666
var ReportCaller = true
6767

6868
func init() {
69-
initLogger("trace", false, "")
69+
initLogger("fatal", false, "")
7070
}
7171

7272
func initLogger(l string, reportCaller bool, lfile string) {

Zip_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ func TestZipDirReadArchive(t *testing.T) {
4343
mockCtrl := gomock.NewController(t)
4444
mockClock := &MockClock{}
4545
hdfsAccessor := NewMockHdfsAccessor(mockCtrl)
46-
fs, _ := NewFileSystem(hdfsAccessor, "/", []string{"*"}, true, false, NewDefaultRetryPolicy(mockClock), mockClock)
46+
fs, _ := NewFileSystem([]HdfsAccessor{hdfsAccessor}, "/", []string{"*"}, true, false, NewDefaultRetryPolicy(mockClock), mockClock)
4747
zipFile, err := os.Open(testZipPath())
4848
assert.Nil(t, err)
4949
zipFileInfo, err := zipFile.Stat()

go.mod

+2-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ require (
1414
gopkg.in/natefinch/lumberjack.v2 v2.0.0
1515
)
1616

17-
replace github.com/colinmarc/hdfs/v2 v2.2.0 => github.com/logicalclocks/hopsfs-go-client/v2 v2.4.2
17+
//replace github.com/colinmarc/hdfs/v2 v2.2.0 => github.com/logicalclocks/hopsfs-go-client/v2 v2.4.2
1818

19-
//replace github.com/colinmarc/hdfs/v2 v2.2.0 => /home/salman/code/hops/hopsfs-go/hopsfs-go-client
19+
replace github.com/colinmarc/hdfs/v2 v2.2.0 => /home/salman/code/hops/hopsfs-go/hopsfs-go-client
2020
//replace bazil.org/fuse v0.0.0-20200524192727-fb710f7dfd05 => /home/salman/code/hops/hopsfs-go/fuse

0 commit comments

Comments
 (0)