Skip to content
This repository was archived by the owner on Sep 11, 2020. It is now read-only.

filesystem: ObjectStorage, MaxOpenDescriptors option #1123

Merged
merged 1 commit into from
Apr 22, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions plumbing/format/packfile/packfile.go
Original file line number Diff line number Diff line change
Expand Up @@ -414,6 +414,11 @@ func (p *Packfile) ID() (plumbing.Hash, error) {
return hash, nil
}

// Scanner returns the packfile's Scanner
func (p *Packfile) Scanner() *Scanner {
return p.s
}

// Close the packfile and its resources.
func (p *Packfile) Close() error {
closer, ok := p.file.(io.Closer)
Expand Down
20 changes: 11 additions & 9 deletions storage/filesystem/dotgit/dotgit.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ type DotGit struct {
packList []plumbing.Hash
packMap map[plumbing.Hash]struct{}

files map[string]billy.File
files map[plumbing.Hash]billy.File
}

// New returns a DotGit value ready to be used. The path argument must
Expand Down Expand Up @@ -245,8 +245,15 @@ func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string {
}

func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) {
if d.files == nil {
d.files = make(map[string]billy.File)
if d.options.KeepDescriptors && extension == "pack" {
if d.files == nil {
d.files = make(map[plumbing.Hash]billy.File)
}

f, ok := d.files[hash]
if ok {
return f, nil
}
}

err := d.hasPack(hash)
Expand All @@ -255,11 +262,6 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
}

path := d.objectPackPath(hash, extension)
f, ok := d.files[path]
if ok {
return f, nil
}

pack, err := d.fs.Open(path)
if err != nil {
if os.IsNotExist(err) {
Expand All @@ -270,7 +272,7 @@ func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.Fil
}

if d.options.KeepDescriptors && extension == "pack" {
d.files[path] = pack
d.files[hash] = pack
}

return pack, nil
Expand Down
145 changes: 106 additions & 39 deletions storage/filesystem/object.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ type ObjectStorage struct {

dir *dotgit.DotGit
index map[plumbing.Hash]idxfile.Index

packList []plumbing.Hash
packListIdx int
packfiles map[plumbing.Hash]*packfile.Packfile
}

// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
Expand Down Expand Up @@ -187,6 +191,73 @@ func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) (
return size, err
}

func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) {
if p := s.packfileFromCache(pack); p != nil {
return p, nil
}

f, err := s.dir.ObjectPack(pack)
if err != nil {
return nil, err
}

var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
}

return p, s.storePackfileInCache(pack, p)
}

func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile {
if s.packfiles == nil {
if s.options.KeepDescriptors {
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile)
} else if s.options.MaxOpenDescriptors > 0 {
s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors)
s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors)
}
}

return s.packfiles[hash]
}

func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error {
if s.options.KeepDescriptors {
s.packfiles[hash] = p
return nil
}

if s.options.MaxOpenDescriptors <= 0 {
return nil
}

// start over as the limit of packList is hit
if s.packListIdx >= len(s.packList) {
s.packListIdx = 0
}

// close the existing packfile if open
if next := s.packList[s.packListIdx]; !next.IsZero() {
open := s.packfiles[next]
delete(s.packfiles, next)
if open != nil {
if err := open.Close(); err != nil {
return err
}
}
}

// cache newly open packfile
s.packList[s.packListIdx] = hash
s.packfiles[hash] = p
s.packListIdx++

return nil
}

func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
size int64, err error) {
if err := s.requireIndex(); err != nil {
Expand All @@ -198,12 +269,6 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, plumbing.ErrObjectNotFound
}

f, err := s.dir.ObjectPack(pack)
if err != nil {
return 0, err
}
defer ioutil.CheckClose(f, &err)

idx := s.index[pack]
hash, err := idx.FindHash(offset)
if err == nil {
Expand All @@ -215,11 +280,13 @@ func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) (
return 0, err
}

var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
p, err := s.packfile(idx, pack)
if err != nil {
return 0, err
}

if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
defer ioutil.CheckClose(p, &err)
}

return p.GetSizeByOffset(offset)
Expand Down Expand Up @@ -361,29 +428,28 @@ func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
return nil, plumbing.ErrObjectNotFound
}

f, err := s.dir.ObjectPack(pack)
idx := s.index[pack]
p, err := s.packfile(idx, pack)
if err != nil {
return nil, err
}

if !s.options.KeepDescriptors {
defer ioutil.CheckClose(f, &err)
if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 {
defer ioutil.CheckClose(p, &err)
}

idx := s.index[pack]
if canBeDelta {
return s.decodeDeltaObjectAt(f, idx, offset, hash)
return s.decodeDeltaObjectAt(p, offset, hash)
}

return s.decodeObjectAt(f, idx, offset)
return s.decodeObjectAt(p, offset)
}

func (s *ObjectStorage) decodeObjectAt(
f billy.File,
idx idxfile.Index,
p *packfile.Packfile,
offset int64,
) (plumbing.EncodedObject, error) {
hash, err := idx.FindHash(offset)
hash, err := p.FindHash(offset)
if err == nil {
obj, ok := s.objectCache.Get(hash)
if ok {
Expand All @@ -395,28 +461,16 @@ func (s *ObjectStorage) decodeObjectAt(
return nil, err
}

var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
}

return p.GetByOffset(offset)
}

func (s *ObjectStorage) decodeDeltaObjectAt(
f billy.File,
idx idxfile.Index,
p *packfile.Packfile,
offset int64,
hash plumbing.Hash,
) (plumbing.EncodedObject, error) {
if _, err := f.Seek(0, io.SeekStart); err != nil {
return nil, err
}

p := packfile.NewScanner(f)
header, err := p.SeekObjectHeader(offset)
scan := p.Scanner()
header, err := scan.SeekObjectHeader(offset)
if err != nil {
return nil, err
}
Expand All @@ -429,12 +483,12 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
case plumbing.REFDeltaObject:
base = header.Reference
case plumbing.OFSDeltaObject:
base, err = idx.FindHash(header.OffsetReference)
base, err = p.FindHash(header.OffsetReference)
if err != nil {
return nil, err
}
default:
return s.decodeObjectAt(f, idx, offset)
return s.decodeObjectAt(p, offset)
}

obj := &plumbing.MemoryObject{}
Expand All @@ -444,7 +498,7 @@ func (s *ObjectStorage) decodeDeltaObjectAt(
return nil, err
}

if _, _, err := p.NextObject(w); err != nil {
if _, _, err := scan.NextObject(w); err != nil {
return nil, err
}

Expand Down Expand Up @@ -515,7 +569,20 @@ func (s *ObjectStorage) buildPackfileIters(

// Close closes all opened files.
func (s *ObjectStorage) Close() error {
return s.dir.Close()
var firstError error
if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 {
for _, packfile := range s.packfiles {
err := packfile.Close()
if firstError == nil && err != nil {
firstError = err
}
}
}

s.packfiles = nil
s.dir.Close()

return firstError
}

type lazyPackfilesIter struct {
Expand Down
18 changes: 18 additions & 0 deletions storage/filesystem/object_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,24 @@ func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) {
})
}

func (s *FsSuite) TestGetFromPackfileMaxOpenDescriptors(c *C) {
fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit()
o := NewObjectStorageWithOptions(dotgit.New(fs), cache.NewObjectLRUDefault(), Options{MaxOpenDescriptors: 1})

expected := plumbing.NewHash("8d45a34641d73851e01d3754320b33bb5be3c4d3")
obj, err := o.getFromPackfile(expected, false)
c.Assert(err, IsNil)
c.Assert(obj.Hash(), Equals, expected)

expected = plumbing.NewHash("e9cfa4c9ca160546efd7e8582ec77952a27b17db")
obj, err = o.getFromPackfile(expected, false)
c.Assert(err, IsNil)
c.Assert(obj.Hash(), Equals, expected)

err = o.Close()
c.Assert(err, IsNil)
}

func (s *FsSuite) TestGetSizeOfObjectFile(c *C) {
fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
o := NewObjectStorage(dotgit.New(fs), cache.NewObjectLRUDefault())
Expand Down
4 changes: 3 additions & 1 deletion storage/filesystem/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ type Options struct {
// KeepDescriptors makes the file descriptors to be reused but they will
// need to be manually closed calling Close().
KeepDescriptors bool
// MaxOpenDescriptors is the max number of file descriptors to keep
// open. If KeepDescriptors is true, all file descriptors will remain open.
MaxOpenDescriptors int
}

// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.
Expand All @@ -43,7 +46,6 @@ func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage {
func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage {
dirOps := dotgit.Options{
ExclusiveAccess: ops.ExclusiveAccess,
KeepDescriptors: ops.KeepDescriptors,
}
dir := dotgit.NewWithOptions(fs, dirOps)

Expand Down