fix dep, impl nonempty

This commit is contained in:
Barak Michener 2018-03-30 19:36:37 -07:00
parent c8c18403e4
commit a5d237df2a
24 changed files with 286 additions and 215 deletions

6
Gopkg.lock generated
View file

@ -8,6 +8,7 @@
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/hanwen/go-fuse"
packages = [
"fuse",
@ -15,8 +16,7 @@
"fuse/pathfs",
"splice"
]
revision = "5690be47d614355a22931c129e1075c25a62e9ac"
version = "v20170619"
revision = "a9ddcb8a4b609500fc59c89ccc9ee05f00a5fefd"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
@ -60,6 +60,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "b608e0349a7dfcc18e5882f75da5d116cf8986b1bc9d87d9b2808093698cd439"
inputs-digest = "9bee1a4379bcd4b9176976a3cedac4a5c846c42ebdc54c623d3fb551c6921aa9"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -28,3 +28,7 @@
[prune]
go-tests = true
unused-packages = true
[[constraint]]
name = "github.com/hanwen/go-fuse"
branch = "master"

View file

@ -10,11 +10,15 @@ import (
var (
hostport string
nonempty bool
readonly bool
)
func init() {
rootCmd.AddCommand(serveCmd)
serveCmd.PersistentFlags().StringVar(&hostport, "listen", "0.0.0.0:8889", "Host and port to listen on")
serveCmd.PersistentFlags().BoolVar(&nonempty, "nonempty", false, "Allow mounting over a nonempty directory (default behavior in libfuse 3.x)")
serveCmd.PersistentFlags().BoolVar(&readonly, "readonly", false, "Mount read-only")
}
var serveCmd = &cobra.Command{
@ -29,5 +33,9 @@ func serveRun(cmd *cobra.Command, args []string) {
os.Exit(1)
}
dir := args[0]
kubelwagen.Serve(hostport, dir)
opts := &kubelwagen.WsFsOpts{
ReadOnly: readonly,
NonEmpty: nonempty,
}
kubelwagen.Serve(hostport, dir, opts)
}

View file

@ -13,6 +13,7 @@ type WsFs struct {
type WsFsOpts struct {
ReadOnly bool
NonEmpty bool
}
func NewWsFs(opts WsFsOpts, req chan RequestCallback, closer chan bool) *pathfs.PathNodeFs {

23
fuse.go
View file

@ -1,16 +1,18 @@
package kubelwagen
import (
"github.com/hanwen/go-fuse/fuse"
"github.com/hanwen/go-fuse/fuse/nodefs"
"github.com/sirupsen/logrus"
)
func serveFuse(dir string, req chan RequestCallback, closer chan bool) error {
opts := WsFsOpts{
ReadOnly: true,
}
func serveFuse(dir string, req chan RequestCallback, opts WsFsOpts, closer chan bool) error {
nfs := NewWsFs(opts, req, closer)
server, _, err := nodefs.MountRoot(dir, nfs.Root(), nil)
mountOpts := mkMountOpts(opts)
conn := nodefs.NewFileSystemConnector(nfs.Root(), &nodefs.Options{})
server, err := fuse.NewServer(conn.RawFS(), dir, mountOpts)
if err != nil {
logrus.Fatalln("cannot mount:", err)
}
@ -19,3 +21,14 @@ func serveFuse(dir string, req chan RequestCallback, closer chan bool) error {
<-closer
return nil
}
func mkMountOpts(opts WsFsOpts) *fuse.MountOptions {
var fusermountopts []string
if opts.NonEmpty {
fusermountopts = append(fusermountopts, "nonempty")
}
mountOpts := &fuse.MountOptions{
Options: fusermountopts,
}
return mountOpts
}

View file

@ -9,7 +9,7 @@ import (
"github.com/sirupsen/logrus"
)
func Serve(hostport string, dir string) {
func Serve(hostport string, dir string, opts *WsFsOpts) {
closer := make(chan bool)
signalChan := make(chan os.Signal, 1)
@ -25,7 +25,7 @@ func Serve(hostport string, dir string) {
req := make(chan RequestCallback, 5)
go func(dir string, closer chan bool) {
err := serveFuse(dir, req, closer)
err := serveFuse(dir, req, *opts, closer)
if err != nil {
logrus.Fatalf("Error serving FUSE: %s", err)
}

View file

@ -11,5 +11,6 @@ Logan Hanks <logan@bitcasa.com>
Nick Cooper <gh@smoogle.org>
Patrick Crosby <pcrosby@gmail.com>
Paul Jolly <paul@myitcv.org.uk>
Shayan Pooya <shayan@arista.com>
Valient Gough <vgough@pobox.com>
Yongwoo Park <nnnlife@gmail.com>

View file

@ -92,7 +92,18 @@ type RawFileSystem interface {
// If called, provide debug output through the log package.
SetDebug(debug bool)
// Lookup is called by the kernel when the VFS wants to know
// about a file inside a directory. Many lookup calls can
// occur in parallel, but only one call happens for each (dir,
// name) pair.
Lookup(header *InHeader, name string, out *EntryOut) (status Status)
// Forget is called when the kernel discards entries from its
// dentry cache. This happens on unmount, and when the kernel
// is short on memory. Since it is not guaranteed to occur at
// any moment, and since there is no return value, Forget
// should not do I/O, as there is no channel to report back
// I/O errors.
Forget(nodeid, nlookup uint64)
// Attributes.

View file

@ -24,10 +24,13 @@ type DirEntry struct {
// Name is the basename of the file in the directory.
Name string
// Ino is the inode number.
Ino uint64
}
func (d DirEntry) String() string {
return fmt.Sprintf("%o: %q", d.Mode, d.Name)
return fmt.Sprintf("%o: %q ino=%d", d.Mode, d.Name, d.Ino)
}
// DirEntryList holds the return value for READDIR and READDIRPLUS
@ -51,12 +54,15 @@ func NewDirEntryList(data []byte, off uint64) *DirEntryList {
// AddDirEntry tries to add an entry, and reports whether it
// succeeded.
func (l *DirEntryList) AddDirEntry(e DirEntry) (bool, uint64) {
return l.Add(0, e.Name, uint64(FUSE_UNKNOWN_INO), e.Mode)
return l.Add(0, e.Name, e.Ino, e.Mode)
}
// Add adds a direntry to the DirEntryList, returning whether it
// succeeded.
func (l *DirEntryList) Add(prefix int, name string, inode uint64, mode uint32) (bool, uint64) {
if inode == 0 {
inode = FUSE_UNKNOWN_INO
}
padding := (8 - len(name)&7) & 7
delta := padding + direntSize + len(name) + prefix
oldLen := len(l.buf)
@ -90,7 +96,7 @@ func (l *DirEntryList) Add(prefix int, name string, inode uint64, mode uint32) (
func (l *DirEntryList) AddDirLookupEntry(e DirEntry) (*EntryOut, uint64) {
lastStart := len(l.buf)
ok, off := l.Add(int(unsafe.Sizeof(EntryOut{})), e.Name,
uint64(FUSE_UNKNOWN_INO), e.Mode)
e.Ino, e.Mode)
if !ok {
return nil, off
}

View file

@ -20,9 +20,21 @@ func openFUSEDevice() (*os.File, error) {
return nil, err
}
if len(fs) == 0 {
// TODO(hanwen): run the load_osxfuse command.
return nil, fmt.Errorf("no FUSE devices found")
bin := oldLoadBin
if _, err := os.Stat(newLoadBin); err == nil {
bin = newLoadBin
}
cmd := exec.Command(bin)
if err := cmd.Run(); err != nil {
return nil, err
}
fs, err = filepath.Glob("/dev/osxfuse*")
if err != nil {
return nil, err
}
}
for _, fn := range fs {
f, err := os.OpenFile(fn, os.O_RDWR, 0)
if err != nil {
@ -34,6 +46,9 @@ func openFUSEDevice() (*os.File, error) {
return nil, fmt.Errorf("all FUSE devices busy")
}
const oldLoadBin = "/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs"
const newLoadBin = "/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse"
const oldMountBin = "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs"
const newMountBin = "/Library/Filesystems/osxfuse.fs/Contents/Resources/mount_osxfuse"

View file

@ -10,7 +10,6 @@ import (
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"syscall"
"unsafe"
@ -71,33 +70,18 @@ func mount(mountPoint string, opts *MountOptions, ready chan<- error) (fd int, e
return -1, err
}
// golang sets CLOEXEC on file descriptors when they are
// acquired through normal operations (e.g. open).
// Buf for fd, we have to set CLOEXEC manually
syscall.CloseOnExec(fd)
close(ready)
return fd, err
}
func privilegedUnmount(mountPoint string) error {
dir, _ := filepath.Split(mountPoint)
bin, err := umountBinary()
if err != nil {
return err
}
proc, err := os.StartProcess(bin,
[]string{bin, mountPoint},
&os.ProcAttr{Dir: dir, Files: []*os.File{nil, nil, os.Stderr}})
if err != nil {
return err
}
w, err := proc.Wait()
if !w.Success() {
return fmt.Errorf("umount exited with code %v\n", w.Sys())
}
return err
}
func unmount(mountPoint string) (err error) {
if os.Geteuid() == 0 {
return privilegedUnmount(mountPoint)
return syscall.Unmount(mountPoint, 0)
}
bin, err := fusermountBinary()
if err != nil {

View file

@ -35,9 +35,9 @@ type handleMap interface {
}
type handled struct {
check uint32
handle uint64
count int
handle uint64
generation uint64
count int
}
func (h *handled) verify() {
@ -76,29 +76,29 @@ func newPortableHandleMap() *portableHandleMap {
func (m *portableHandleMap) Register(obj *handled) (handle, generation uint64) {
m.Lock()
if obj.count == 0 {
if obj.check != 0 {
panic(_ALREADY_MSG)
}
if len(m.freeIds) == 0 {
handle = uint64(len(m.handles))
m.handles = append(m.handles, obj)
} else {
handle = m.freeIds[len(m.freeIds)-1]
m.freeIds = m.freeIds[:len(m.freeIds)-1]
m.generation++
m.handles[handle] = obj
}
m.used++
obj.handle = handle
} else {
handle = obj.handle
defer m.Unlock()
// Reuse existing handle
if obj.count != 0 {
obj.count++
return obj.handle, obj.generation
}
// Create a new handle number or recycle one on from the free list
if len(m.freeIds) == 0 {
obj.handle = uint64(len(m.handles))
m.handles = append(m.handles, obj)
} else {
obj.handle = m.freeIds[len(m.freeIds)-1]
m.freeIds = m.freeIds[:len(m.freeIds)-1]
m.handles[obj.handle] = obj
}
// Increment generation number to guarantee the (handle, generation) tuple
// is unique
m.generation++
m.used++
obj.generation = m.generation
obj.count++
generation = m.generation
m.Unlock()
return
return obj.handle, obj.generation
}
func (m *portableHandleMap) Handle(obj *handled) (h uint64) {

View file

@ -24,10 +24,6 @@ type parentData struct {
type Inode struct {
handled handled
// Generation number of the inode. Each (re)use of an inode
// should have a unique generation number.
generation uint64
// Number of open files and its protection.
openFilesMutex sync.Mutex
openFiles []*openedFile
@ -94,6 +90,8 @@ func (n *Inode) Parent() (parent *Inode, name string) {
if n.mountPoint != nil {
return nil, ""
}
n.mount.treeLock.RLock()
defer n.mount.treeLock.RUnlock()
for k := range n.parents {
return k.parent, k.name
}

View file

@ -90,7 +90,8 @@ func doInit(server *Server, req *request) {
}
server.reqMu.Unlock()
out := &InitOut{
out := (*InitOut)(req.outData())
*out = InitOut{
Major: _FUSE_KERNEL_VERSION,
Minor: _OUR_MINOR_VERSION,
MaxReadAhead: input.MaxReadAhead,
@ -99,6 +100,7 @@ func doInit(server *Server, req *request) {
CongestionThreshold: uint16(server.opts.MaxBackground * 3 / 4),
MaxBackground: uint16(server.opts.MaxBackground),
}
if server.opts.MaxReadAhead != 0 && uint32(server.opts.MaxReadAhead) < out.MaxReadAhead {
out.MaxReadAhead = uint32(server.opts.MaxReadAhead)
}
@ -114,12 +116,11 @@ func doInit(server *Server, req *request) {
req.handler = &tweaked
}
req.outData = unsafe.Pointer(out)
req.status = OK
}
func doOpen(server *Server, req *request) {
out := (*OpenOut)(req.outData)
out := (*OpenOut)(req.outData())
status := server.fileSystem.Open((*OpenIn)(req.inData), out)
req.status = status
if status != OK {
@ -128,7 +129,7 @@ func doOpen(server *Server, req *request) {
}
func doCreate(server *Server, req *request) {
out := (*CreateOut)(req.outData)
out := (*CreateOut)(req.outData())
status := server.fileSystem.Create((*CreateIn)(req.inData), req.filenames[0], out)
req.status = status
}
@ -154,19 +155,19 @@ func doReadDirPlus(server *Server, req *request) {
}
func doOpenDir(server *Server, req *request) {
out := (*OpenOut)(req.outData)
out := (*OpenOut)(req.outData())
status := server.fileSystem.OpenDir((*OpenIn)(req.inData), out)
req.status = status
}
func doSetattr(server *Server, req *request) {
out := (*AttrOut)(req.outData)
out := (*AttrOut)(req.outData())
req.status = server.fileSystem.SetAttr((*SetAttrIn)(req.inData), out)
}
func doWrite(server *Server, req *request) {
n, status := server.fileSystem.Write((*WriteIn)(req.inData), req.arg)
o := (*WriteOut)(req.outData)
o := (*WriteOut)(req.outData())
o.Size = n
req.status = status
}
@ -193,7 +194,7 @@ func doGetXAttr(server *Server, req *request) {
input := (*GetXAttrIn)(req.inData)
if input.Size == 0 {
out := (*GetXAttrOut)(req.outData)
out := (*GetXAttrOut)(req.outData())
switch req.inHeader.Opcode {
case _OP_GETXATTR:
// TODO(hanwen): double check this. For getxattr, input.Size
@ -214,8 +215,6 @@ func doGetXAttr(server *Server, req *request) {
return
}
}
req.outData = nil
var data []byte
switch req.inHeader.Opcode {
case _OP_GETXATTR:
@ -239,7 +238,7 @@ func doGetXAttr(server *Server, req *request) {
}
func doGetAttr(server *Server, req *request) {
out := (*AttrOut)(req.outData)
out := (*AttrOut)(req.outData())
s := server.fileSystem.GetAttr((*GetAttrIn)(req.inData), out)
req.status = s
}
@ -272,6 +271,9 @@ func doBatchForget(server *Server, req *request) {
if server.opts.Debug {
log.Printf("doBatchForget: forgetting %d of %d: NodeId: %d, Nlookup: %d", i+1, len(forgets), f.NodeId, f.Nlookup)
}
if f.NodeId == pollHackInode {
continue
}
server.fileSystem.Forget(f.NodeId, f.Nlookup)
}
}
@ -281,20 +283,19 @@ func doReadlink(server *Server, req *request) {
}
func doLookup(server *Server, req *request) {
out := (*EntryOut)(req.outData)
out := (*EntryOut)(req.outData())
s := server.fileSystem.Lookup(req.inHeader, req.filenames[0], out)
req.status = s
req.outData = unsafe.Pointer(out)
}
func doMknod(server *Server, req *request) {
out := (*EntryOut)(req.outData)
out := (*EntryOut)(req.outData())
req.status = server.fileSystem.Mknod((*MknodIn)(req.inData), req.filenames[0], out)
}
func doMkdir(server *Server, req *request) {
out := (*EntryOut)(req.outData)
out := (*EntryOut)(req.outData())
req.status = server.fileSystem.Mkdir((*MkdirIn)(req.inData), req.filenames[0], out)
}
@ -307,7 +308,7 @@ func doRmdir(server *Server, req *request) {
}
func doLink(server *Server, req *request) {
out := (*EntryOut)(req.outData)
out := (*EntryOut)(req.outData())
req.status = server.fileSystem.Link((*LinkIn)(req.inData), req.filenames[0], out)
}
@ -358,7 +359,7 @@ func doAccess(server *Server, req *request) {
}
func doSymlink(server *Server, req *request) {
out := (*EntryOut)(req.outData)
out := (*EntryOut)(req.outData())
req.status = server.fileSystem.Symlink(req.inHeader, req.filenames[1], req.filenames[0], out)
}
@ -367,7 +368,7 @@ func doRename(server *Server, req *request) {
}
func doStatFs(server *Server, req *request) {
out := (*StatfsOut)(req.outData)
out := (*StatfsOut)(req.outData())
req.status = server.fileSystem.StatFs(req.inHeader, out)
if req.status == ENOSYS && runtime.GOOS == "darwin" {
// OSX FUSE requires Statfs to be implemented for the

View file

@ -89,7 +89,7 @@ func (fs *loopbackFileSystem) OpenDir(name string, context *fuse.Context) (strea
for {
infos, err := f.Readdir(want)
for i := range infos {
// workaround forhttps://code.google.com/p/go/issues/detail?id=5960
// workaround for https://code.google.com/p/go/issues/detail?id=5960
if infos[i] == nil {
continue
}
@ -99,6 +99,7 @@ func (fs *loopbackFileSystem) OpenDir(name string, context *fuse.Context) (strea
}
if s := fuse.ToStatT(infos[i]); s != nil {
d.Mode = uint32(s.Mode)
d.Ino = s.Ino
} else {
log.Printf("ReadDir entry %q for %q has no stat info", n, name)
}

View file

@ -276,7 +276,6 @@ func (n *pathInode) GetPath() string {
// them, them, but since this is a hot path, we take some
// effort to avoid allocations.
n.pathFs.pathLock.RLock()
walkUp := n.Inode()
// TODO - guess depth?
@ -299,7 +298,6 @@ func (n *pathInode) GetPath() string {
pathBytes = append(pathBytes, '/')
}
}
n.pathFs.pathLock.RUnlock()
path := string(pathBytes)
if n.pathFs.debug {
@ -585,26 +583,34 @@ func (n *pathInode) GetAttr(out *fuse.Attr, file nodefs.File, context *fuse.Cont
// an open fd.
file = n.Inode().AnyFile()
}
// If we have found an open file, try to fstat it.
if file != nil {
code = file.GetAttr(out)
if code.Ok() {
return code
}
}
// If we don't have an open file, or fstat on it failed due to an internal
// error, stat by path.
if file == nil || code == fuse.ENOSYS || code == fuse.EBADF {
fi, code = n.fs.GetAttr(n.GetPath(), context)
if !code.Ok() {
return code
}
// This is a bug in the filesystem implementation, but let's not
// crash.
if fi == nil {
log.Printf("Bug: fs.GetAttr returned OK with nil data")
return fuse.EINVAL
}
}
if fi != nil {
n.setClientInode(fi.Ino)
}
if fi != nil && !fi.IsDir() && fi.Nlink == 0 {
// Set inode number (unless already set or disabled).
n.setClientInode(fi.Ino)
// Help filesystems that forget to set Nlink.
if !fi.IsDir() && fi.Nlink == 0 {
fi.Nlink = 1
}
if fi != nil {
*out = *fi
}
*out = *fi
return code
}

View file

@ -46,6 +46,9 @@ func init() {
CAP_ASYNC_DIO: "ASYNC_DIO",
CAP_WRITEBACK_CACHE: "WRITEBACK_CACHE",
CAP_NO_OPEN_SUPPORT: "NO_OPEN_SUPPORT",
CAP_PARALLEL_DIROPS: "CAP_PARALLEL_DIROPS",
CAP_HANDLE_KILLPRIV: "CAP_PARALLEL_DIROPS",
CAP_POSIX_ACL: "CAP_POSIX_ACL",
}
releaseFlagNames = map[int64]string{
RELEASE_FLUSH: "FLUSH",

View file

@ -26,8 +26,7 @@ type request struct {
filenames []string // filename arguments
// Unstructured data, a pointer to the relevant XxxxOut struct.
outData unsafe.Pointer
// Output data.
status Status
flatData []byte
fdData *readResultFd
@ -65,7 +64,6 @@ func (r *request) clear() {
r.inData = nil
r.arg = nil
r.filenames = nil
r.outData = nil
r.status = OK
r.flatData = nil
r.fdData = nil
@ -96,8 +94,8 @@ func (r *request) InputDebug() string {
func (r *request) OutputDebug() string {
var dataStr string
if r.handler.DecodeOut != nil && r.outData != nil {
dataStr = Print(r.handler.DecodeOut(r.outData))
if r.handler.DecodeOut != nil && r.handler.OutputSize > 0 {
dataStr = Print(r.handler.DecodeOut(r.outData()))
}
max := 1024
@ -115,7 +113,7 @@ func (r *request) OutputDebug() string {
if r.fdData != nil {
spl = " (fd data)"
}
flatStr = fmt.Sprintf(" %d bytes data%s\n", r.flatDataSize(), spl)
flatStr = fmt.Sprintf(" %d bytes data%s", r.flatDataSize(), spl)
}
}
@ -186,26 +184,35 @@ func (r *request) parse() {
copy(r.outBuf[:r.handler.OutputSize+sizeOfOutHeader],
zeroOutBuf[:r.handler.OutputSize+sizeOfOutHeader])
r.outData = unsafe.Pointer(&r.outBuf[sizeOfOutHeader])
}
func (r *request) serializeHeader(dataSize int) (header []byte) {
func (r *request) outData() unsafe.Pointer {
return unsafe.Pointer(&r.outBuf[sizeOfOutHeader])
}
// serializeHeader serializes the response header. The header points
// to an internal buffer of the receiver.
func (r *request) serializeHeader(flatDataSize int) (header []byte) {
dataLength := r.handler.OutputSize
if r.outData == nil || r.status > OK {
if r.status > OK {
dataLength = 0
}
sizeOfOutHeader := unsafe.Sizeof(OutHeader{})
// [GET|LIST]XATTR is two opcodes in one: get/list xattr size (return
// structured GetXAttrOut, no flat data) and get/list xattr data
// (return no structured data, but only flat data)
if r.inHeader.Opcode == _OP_GETXATTR || r.inHeader.Opcode == _OP_LISTXATTR {
if (*GetXAttrIn)(r.inData).Size != 0 {
dataLength = 0
}
}
header = r.outBuf[:sizeOfOutHeader+dataLength]
o := (*OutHeader)(unsafe.Pointer(&header[0]))
o.Unique = r.inHeader.Unique
o.Status = int32(-r.status)
o.Length = uint32(
int(sizeOfOutHeader) + int(dataLength) + dataSize)
var asSlice []byte
toSlice(&asSlice, r.outData, dataLength)
copy(header[sizeOfOutHeader:], asSlice)
int(sizeOfOutHeader) + int(dataLength) + flatDataSize)
return header
}

View file

@ -14,7 +14,6 @@ import (
"sync"
"syscall"
"time"
"unsafe"
)
const (
@ -161,6 +160,7 @@ func NewServer(fs RawFileSystem, mountPoint string, opts *MountOptions) (*Server
// FUSE device: on unmount, sometime some reads do not
// error-out, meaning that unmount will hang.
singleReader: runtime.GOOS == "darwin",
ready: make(chan error, 1),
}
ms.reqPool.New = func() interface{} { return new(request) }
ms.readPool.New = func() interface{} { return make([]byte, o.MaxWrite+pageSize) }
@ -173,7 +173,6 @@ func NewServer(fs RawFileSystem, mountPoint string, opts *MountOptions) (*Server
}
mountPoint = filepath.Clean(filepath.Join(cwd, mountPoint))
}
ms.ready = make(chan error, 1)
fd, err := mount(mountPoint, &o, ms.ready)
if err != nil {
return nil, err
@ -389,12 +388,19 @@ func (ms *Server) handleRequest(req *request) Status {
log.Println(req.InputDebug())
}
if req.status.Ok() && req.handler.Func == nil {
if req.inHeader.NodeId == pollHackInode {
// We want to avoid switching off features through our
// poll hack, so don't use ENOSYS
req.status = EIO
if req.inHeader.Opcode == _OP_POLL {
req.status = ENOSYS
}
} else if req.inHeader.NodeId == FUSE_ROOT_ID && len(req.filenames) > 0 && req.filenames[0] == pollHackName {
doPollHackLookup(ms, req)
} else if req.status.Ok() && req.handler.Func == nil {
log.Printf("Unimplemented opcode %v", operationName(req.inHeader.Opcode))
req.status = ENOSYS
}
if req.status.Ok() {
} else if req.status.Ok() {
req.handler.Func(ms, req)
}
@ -445,11 +451,6 @@ func (ms *Server) InodeNotify(node uint64, off int64, length int64) Status {
return ENOSYS
}
entry := &NotifyInvalInodeOut{
Ino: node,
Off: off,
Length: length,
}
req := request{
inHeader: &InHeader{
Opcode: _OP_NOTIFY_INODE,
@ -457,7 +458,11 @@ func (ms *Server) InodeNotify(node uint64, off int64, length int64) Status {
handler: operationHandlers[_OP_NOTIFY_INODE],
status: NOTIFY_INVAL_INODE,
}
req.outData = unsafe.Pointer(entry)
entry := (*NotifyInvalInodeOut)(req.outData())
entry.Ino = node
entry.Off = off
entry.Length = length
// Protect against concurrent close.
ms.writeMu.Lock()
@ -487,18 +492,17 @@ func (ms *Server) DeleteNotify(parent uint64, child uint64, name string) Status
handler: operationHandlers[_OP_NOTIFY_DELETE],
status: NOTIFY_INVAL_DELETE,
}
entry := &NotifyInvalDeleteOut{
Parent: parent,
Child: child,
NameLen: uint32(len(name)),
}
entry := (*NotifyInvalDeleteOut)(req.outData())
entry.Parent = parent
entry.Child = child
entry.NameLen = uint32(len(name))
// Many versions of FUSE generate stacktraces if the
// terminating null byte is missing.
nameBytes := make([]byte, len(name)+1)
copy(nameBytes, name)
nameBytes[len(nameBytes)-1] = '\000'
req.outData = unsafe.Pointer(entry)
req.flatData = nameBytes
// Protect against concurrent close.
@ -526,17 +530,15 @@ func (ms *Server) EntryNotify(parent uint64, name string) Status {
handler: operationHandlers[_OP_NOTIFY_ENTRY],
status: NOTIFY_INVAL_ENTRY,
}
entry := &NotifyInvalEntryOut{
Parent: parent,
NameLen: uint32(len(name)),
}
entry := (*NotifyInvalEntryOut)(req.outData())
entry.Parent = parent
entry.NameLen = uint32(len(name))
// Many versions of FUSE generate stacktraces if the
// terminating null byte is missing.
nameBytes := make([]byte, len(name)+1)
copy(nameBytes, name)
nameBytes[len(nameBytes)-1] = '\000'
req.outData = unsafe.Pointer(entry)
req.flatData = nameBytes
// Protect against concurrent close.
@ -579,8 +581,10 @@ func init() {
// WaitMount waits for the first request to be served. Use this to
// avoid racing between accessing the (empty or not yet mounted)
// mountpoint, and the OS trying to setup the user-space mount.
// Currently, this call only necessary on OSX.
func (ms *Server) WaitMount() error {
err := <-ms.ready
return err
if err != nil {
return err
}
return pollHack(ms.mountPoint)
}

View file

@ -194,6 +194,9 @@ const (
CAP_ASYNC_DIO = (1 << 15)
CAP_WRITEBACK_CACHE = (1 << 16)
CAP_NO_OPEN_SUPPORT = (1 << 17)
CAP_PARALLEL_DIROPS = (1 << 18)
CAP_HANDLE_KILLPRIV = (1 << 19)
CAP_POSIX_ACL = (1 << 20)
)
type InitIn struct {

View file

@ -6,11 +6,11 @@ package splice
import (
"fmt"
"os"
"syscall"
)
type Pair struct {
r, w *os.File
r, w int
size int
}
@ -30,7 +30,7 @@ func (p *Pair) Grow(n int) error {
return fmt.Errorf("splice: want %d bytes, max pipe size %d", n, maxPipeSize)
}
newsize, errNo := fcntl(p.r.Fd(), F_SETPIPE_SZ, n)
newsize, errNo := fcntl(uintptr(p.r), F_SETPIPE_SZ, n)
if errNo != 0 {
return fmt.Errorf("splice: fcntl returned %v", errNo)
}
@ -43,8 +43,8 @@ func (p *Pair) Cap() int {
}
func (p *Pair) Close() error {
err1 := p.r.Close()
err2 := p.w.Close()
err1 := syscall.Close(p.r)
err2 := syscall.Close(p.w)
if err1 != nil {
return err1
}
@ -52,17 +52,17 @@ func (p *Pair) Close() error {
}
func (p *Pair) Read(d []byte) (n int, err error) {
return p.r.Read(d)
}
func (p *Pair) ReadFd() uintptr {
return p.r.Fd()
}
func (p *Pair) WriteFd() uintptr {
return p.w.Fd()
return syscall.Read(p.r, d)
}
func (p *Pair) Write(d []byte) (n int, err error) {
return p.w.Write(d)
return syscall.Write(p.w, d)
}
func (p *Pair) ReadFd() uintptr {
return uintptr(p.r)
}
func (p *Pair) WriteFd() uintptr {
return uintptr(p.w)
}

View file

@ -11,7 +11,7 @@ import (
)
func (p *Pair) LoadFromAt(fd uintptr, sz int, off int64) (int, error) {
n, err := syscall.Splice(int(fd), &off, int(p.w.Fd()), nil, sz, 0)
n, err := syscall.Splice(int(fd), &off, p.w, nil, sz, 0)
return int(n), err
}
@ -21,7 +21,7 @@ func (p *Pair) LoadFrom(fd uintptr, sz int) (int, error) {
sz, p.size)
}
n, err := syscall.Splice(int(fd), nil, int(p.w.Fd()), nil, sz, 0)
n, err := syscall.Splice(int(fd), nil, p.w, nil, sz, 0)
if err != nil {
err = os.NewSyscallError("Splice load from", err)
}
@ -29,9 +29,20 @@ func (p *Pair) LoadFrom(fd uintptr, sz int) (int, error) {
}
func (p *Pair) WriteTo(fd uintptr, n int) (int, error) {
m, err := syscall.Splice(int(p.r.Fd()), nil, int(fd), nil, int(n), 0)
m, err := syscall.Splice(p.r, nil, int(fd), nil, int(n), 0)
if err != nil {
err = os.NewSyscallError("Splice write", err)
}
return int(m), err
}
const _SPLICE_F_NONBLOCK = 0x2
func (p *Pair) discard() {
_, err := syscall.Splice(p.r, nil, int(devNullFD), nil, int(p.size), _SPLICE_F_NONBLOCK)
if err == syscall.EAGAIN {
// all good.
} else if err != nil {
panic(err)
}
}

View file

@ -5,7 +5,6 @@
package splice
import (
"io"
"sync"
)
@ -33,7 +32,7 @@ func Used() int {
return splicePool.used()
}
// Return pipe pair to pool
// Done returns the pipe pair to pool.
func Done(p *Pair) {
splicePool.done(p)
}
@ -47,71 +46,59 @@ func newSplicePairPool() *pairPool {
return &pairPool{}
}
func (me *pairPool) clear() {
me.Lock()
for _, p := range me.unused {
func (pp *pairPool) clear() {
pp.Lock()
for _, p := range pp.unused {
p.Close()
}
me.unused = me.unused[:0]
me.Unlock()
pp.unused = pp.unused[:0]
pp.Unlock()
}
func (me *pairPool) used() (n int) {
me.Lock()
n = me.usedCount
me.Unlock()
func (pp *pairPool) used() (n int) {
pp.Lock()
n = pp.usedCount
pp.Unlock()
return n
}
func (me *pairPool) total() int {
me.Lock()
n := me.usedCount + len(me.unused)
me.Unlock()
func (pp *pairPool) total() int {
pp.Lock()
n := pp.usedCount + len(pp.unused)
pp.Unlock()
return n
}
func (me *pairPool) drop(p *Pair) {
func (pp *pairPool) drop(p *Pair) {
p.Close()
me.Lock()
me.usedCount--
me.Unlock()
pp.Lock()
pp.usedCount--
pp.Unlock()
}
func (me *pairPool) get() (p *Pair, err error) {
me.Lock()
defer me.Unlock()
func (pp *pairPool) get() (p *Pair, err error) {
pp.Lock()
defer pp.Unlock()
me.usedCount++
l := len(me.unused)
pp.usedCount++
l := len(pp.unused)
if l > 0 {
p := me.unused[l-1]
me.unused = me.unused[:l-1]
p := pp.unused[l-1]
pp.unused = pp.unused[:l-1]
return p, nil
}
return newSplicePair()
}
var discardBuffer [32 * 1024]byte
func (pp *pairPool) done(p *Pair) {
p.discard()
func DiscardAll(r io.Reader) {
buf := discardBuffer[:]
for {
n, _ := r.Read(buf)
if n < len(buf) {
break
}
}
}
func (me *pairPool) done(p *Pair) {
DiscardAll(p.r)
me.Lock()
me.usedCount--
me.unused = append(me.unused, p)
me.Unlock()
pp.Lock()
pp.usedCount--
pp.unused = append(pp.unused, p)
pp.Unlock()
}
func init() {

View file

@ -30,6 +30,9 @@ func MaxPipeSize() int {
// Since Linux 2.6.11, the pipe capacity is 65536 bytes.
const DefaultPipeSize = 16 * 4096
// We empty pipes by splicing to /dev/null.
var devNullFD uintptr
func init() {
content, err := ioutil.ReadFile("/proc/sys/fs/pipe-max-size")
if err != nil {
@ -48,6 +51,13 @@ func init() {
resizable = resizable && (errNo == 0)
r.Close()
w.Close()
fd, err := syscall.Open("/dev/null", os.O_WRONLY, 0)
if err != nil {
log.Panicf("splice: %v", err)
}
devNullFD = uintptr(fd)
}
// copy & paste from syscall.
@ -61,30 +71,27 @@ func fcntl(fd uintptr, cmd int, arg int) (val int, errno syscall.Errno) {
const F_SETPIPE_SZ = 1031
const F_GETPIPE_SZ = 1032
func osPipe() (int, int, error) {
var fds [2]int
err := syscall.Pipe2(fds[:], syscall.O_NONBLOCK)
return fds[0], fds[1], err
}
func newSplicePair() (p *Pair, err error) {
p = &Pair{}
p.r, p.w, err = os.Pipe()
p.r, p.w, err = osPipe()
if err != nil {
return nil, err
}
errNo := syscall.Errno(0)
for _, f := range []*os.File{p.r, p.w} {
_, errNo = fcntl(f.Fd(), syscall.F_SETFL, syscall.O_NONBLOCK)
if errNo != 0 {
p.Close()
return nil, os.NewSyscallError("fcntl setfl", errNo)
}
}
p.size, errNo = fcntl(p.r.Fd(), F_GETPIPE_SZ, 0)
if errNo == syscall.EINVAL {
var errNo syscall.Errno
p.size, errNo = fcntl(uintptr(p.r), F_GETPIPE_SZ, 0)
if err == syscall.EINVAL {
p.size = DefaultPipeSize
return p, nil
}
if errNo != 0 {
p.Close()
return nil, os.NewSyscallError("fcntl getsize", errNo)
return nil, fmt.Errorf("fcntl getsize: %v", errNo)
}
return p, nil
}