mirror of
https://github.com/kemko/nomad.git
synced 2026-01-07 10:55:42 +03:00
Merge pull request #1404 from hashicorp/f-streaming
Implement a streaming API and tail in the fs command
This commit is contained in:
100
api/fs.go
100
api/fs.go
@@ -11,6 +11,14 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// OriginStart and OriginEnd are the available parameters for the origin
|
||||
// argument when streaming a file. They respectively offset from the start
|
||||
// and end of a file.
|
||||
OriginStart = "start"
|
||||
OriginEnd = "end"
|
||||
)
|
||||
|
||||
// AllocFileInfo holds information about a file inside the AllocDir
|
||||
type AllocFileInfo struct {
|
||||
Name string
|
||||
@@ -20,6 +28,19 @@ type AllocFileInfo struct {
|
||||
ModTime time.Time
|
||||
}
|
||||
|
||||
// StreamFrame is used to frame data of a file when streaming
|
||||
type StreamFrame struct {
|
||||
Offset int64 `json:",omitempty"`
|
||||
Data []byte `json:",omitempty"`
|
||||
File string `json:",omitempty"`
|
||||
FileEvent string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// IsHeartbeat returns if the frame is a heartbeat frame
|
||||
func (s *StreamFrame) IsHeartbeat() bool {
|
||||
return len(s.Data) == 0 && s.FileEvent == "" && s.File == "" && s.Offset == 0
|
||||
}
|
||||
|
||||
// AllocFS is used to introspect an allocation directory on a Nomad client
|
||||
type AllocFS struct {
|
||||
client *Client
|
||||
@@ -107,7 +128,7 @@ func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocF
|
||||
}
|
||||
|
||||
// ReadAt is used to read bytes at a given offset until limit at the given path
|
||||
// in an allocation directory
|
||||
// in an allocation directory. If limit is <= 0, there is no limit.
|
||||
func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int64, q *QueryOptions) (io.Reader, *QueryMeta, error) {
|
||||
node, _, err := a.client.Nodes().Info(alloc.NodeID, &QueryOptions{})
|
||||
if err != nil {
|
||||
@@ -177,3 +198,80 @@ func (a *AllocFS) getErrorMsg(resp *http.Response) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Stream streams the content of a file blocking on EOF.
|
||||
// The parameters are:
|
||||
// * path: path to file to stream.
|
||||
// * offset: The offset to start streaming data at.
|
||||
// * origin: Either "start" or "end" and defines from where the offset is applied.
|
||||
// * cancel: A channel which when closed will stop streaming.
|
||||
//
|
||||
// The return value is a channel that will emit StreamFrames as they are read.
|
||||
func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64,
|
||||
cancel <-chan struct{}, q *QueryOptions) (<-chan *StreamFrame, *QueryMeta, error) {
|
||||
|
||||
node, _, err := a.client.Nodes().Info(alloc.NodeID, q)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if node.HTTPAddr == "" {
|
||||
return nil, nil, fmt.Errorf("http addr of the node where alloc %q is running is not advertised", alloc.ID)
|
||||
}
|
||||
u := &url.URL{
|
||||
Scheme: "http",
|
||||
Host: node.HTTPAddr,
|
||||
Path: fmt.Sprintf("/v1/client/fs/stream/%s", alloc.ID),
|
||||
}
|
||||
v := url.Values{}
|
||||
v.Set("path", path)
|
||||
v.Set("origin", origin)
|
||||
v.Set("offset", strconv.FormatInt(offset, 10))
|
||||
u.RawQuery = v.Encode()
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: u,
|
||||
Cancel: cancel,
|
||||
}
|
||||
c := http.Client{}
|
||||
resp, err := c.Do(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Create the output channel
|
||||
frames := make(chan *StreamFrame, 10)
|
||||
|
||||
go func() {
|
||||
// Close the body
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Create a decoder
|
||||
dec := json.NewDecoder(resp.Body)
|
||||
|
||||
for {
|
||||
// Check if we have been cancelled
|
||||
select {
|
||||
case <-cancel:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Decode the next frame
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
close(frames)
|
||||
return
|
||||
}
|
||||
|
||||
// Discard heartbeat frames
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
frames <- &frame
|
||||
}
|
||||
}()
|
||||
|
||||
return frames, nil, nil
|
||||
}
|
||||
|
||||
@@ -6,10 +6,14 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"gopkg.in/tomb.v1"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
"github.com/hpcloud/tail/watch"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -56,7 +60,9 @@ type AllocFileInfo struct {
|
||||
type AllocDirFS interface {
|
||||
List(path string) ([]*AllocFileInfo, error)
|
||||
Stat(path string) (*AllocFileInfo, error)
|
||||
ReadAt(path string, offset int64, limit int64) (io.ReadCloser, error)
|
||||
ReadAt(path string, offset int64) (io.ReadCloser, error)
|
||||
BlockUntilExists(path string, t *tomb.Tomb) error
|
||||
ChangeEvents(path string, curOffset int64, t *tomb.Tomb) (*watch.FileChanges, error)
|
||||
}
|
||||
|
||||
func NewAllocDir(allocDir string) *AllocDir {
|
||||
@@ -322,9 +328,8 @@ func (d *AllocDir) Stat(path string) (*AllocFileInfo, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReadAt returns a reader for a file at the path relative to the alloc dir
|
||||
// which will read a chunk of bytes at a particular offset
|
||||
func (d *AllocDir) ReadAt(path string, offset int64, limit int64) (io.ReadCloser, error) {
|
||||
// ReadAt returns a reader for a file at the path relative to the alloc dir
|
||||
func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) {
|
||||
p := filepath.Join(d.AllocDir, path)
|
||||
f, err := os.Open(p)
|
||||
if err != nil {
|
||||
@@ -333,14 +338,36 @@ func (d *AllocDir) ReadAt(path string, offset int64, limit int64) (io.ReadCloser
|
||||
if _, err := f.Seek(offset, 0); err != nil {
|
||||
return nil, fmt.Errorf("can't seek to offset %q: %v", offset, err)
|
||||
}
|
||||
return &ReadCloserWrapper{Reader: io.LimitReader(f, limit), Closer: f}, nil
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// ReadCloserWrapper wraps a LimitReader so that a file is closed once it has been
|
||||
// read
|
||||
type ReadCloserWrapper struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
// BlockUntilExists blocks until the passed file relative the allocation
|
||||
// directory exists. The block can be cancelled with the passed tomb.
|
||||
func (d *AllocDir) BlockUntilExists(path string, t *tomb.Tomb) error {
|
||||
// Get the path relative to the alloc directory
|
||||
p := filepath.Join(d.AllocDir, path)
|
||||
watcher := getFileWatcher(p)
|
||||
return watcher.BlockUntilExists(t)
|
||||
}
|
||||
|
||||
// ChangeEvents watches for changes to the passed path relative to the
|
||||
// allocation directory. The offset should be the last read offset. The tomb is
|
||||
// used to clean up the watch.
|
||||
func (d *AllocDir) ChangeEvents(path string, curOffset int64, t *tomb.Tomb) (*watch.FileChanges, error) {
|
||||
// Get the path relative to the alloc directory
|
||||
p := filepath.Join(d.AllocDir, path)
|
||||
watcher := getFileWatcher(p)
|
||||
return watcher.ChangeEvents(t, curOffset)
|
||||
}
|
||||
|
||||
// getFileWatcher returns a FileWatcher for the given path.
|
||||
func getFileWatcher(path string) watch.FileWatcher {
|
||||
if runtime.GOOS == "windows" {
|
||||
// There are some deadlock issues with the inotify implementation on
|
||||
// windows. Use polling watcher for now.
|
||||
return watch.NewPollingFileWatcher(path)
|
||||
}
|
||||
return watch.NewInotifyFileWatcher(path)
|
||||
}
|
||||
|
||||
func fileCopy(src, dst string, perm os.FileMode) error {
|
||||
|
||||
@@ -1,17 +1,44 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gopkg.in/tomb.v1"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hpcloud/tail/watch"
|
||||
"github.com/ugorji/go/codec"
|
||||
)
|
||||
|
||||
var (
|
||||
allocIDNotPresentErr = fmt.Errorf("must provide a valid alloc id")
|
||||
fileNameNotPresentErr = fmt.Errorf("must provide a file name")
|
||||
clientNotRunning = fmt.Errorf("node is not running a Nomad Client")
|
||||
invalidOrigin = fmt.Errorf("origin must be start or end")
|
||||
)
|
||||
|
||||
const (
|
||||
// streamFrameSize is the maximum number of bytes to send in a single frame
|
||||
streamFrameSize = 64 * 1024
|
||||
|
||||
// streamHeartbeatRate is the rate at which a heartbeat will occur to detect
|
||||
// a closed connection without sending any additional data
|
||||
streamHeartbeatRate = 10 * time.Second
|
||||
|
||||
// streamBatchWindow is the window in which file content is batched before
|
||||
// being flushed if the frame size has not been hit.
|
||||
streamBatchWindow = 200 * time.Millisecond
|
||||
|
||||
deleteEvent = "file deleted"
|
||||
truncateEvent = "file truncated"
|
||||
)
|
||||
|
||||
func (s *HTTPServer) FsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
@@ -29,6 +56,8 @@ func (s *HTTPServer) FsRequest(resp http.ResponseWriter, req *http.Request) (int
|
||||
return s.FileReadAtRequest(resp, req)
|
||||
case strings.HasPrefix(path, "cat/"):
|
||||
return s.FileCatRequest(resp, req)
|
||||
case strings.HasPrefix(path, "stream/"):
|
||||
return s.Stream(resp, req)
|
||||
default:
|
||||
return nil, CodedError(404, ErrInvalidMethod)
|
||||
}
|
||||
@@ -82,19 +111,40 @@ func (s *HTTPServer) FileReadAtRequest(resp http.ResponseWriter, req *http.Reque
|
||||
if offset, err = strconv.ParseInt(q.Get("offset"), 10, 64); err != nil {
|
||||
return nil, fmt.Errorf("error parsing offset: %v", err)
|
||||
}
|
||||
if limit, err = strconv.ParseInt(q.Get("limit"), 10, 64); err != nil {
|
||||
return nil, fmt.Errorf("error parsing limit: %v", err)
|
||||
|
||||
// Parse the limit
|
||||
if limitStr := q.Get("limit"); limitStr != "" {
|
||||
if limit, err = strconv.ParseInt(limitStr, 10, 64); err != nil {
|
||||
return nil, fmt.Errorf("error parsing limit: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
fs, err := s.agent.client.GetAllocFS(allocID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, err := fs.ReadAt(path, offset, limit)
|
||||
|
||||
rc, err := fs.ReadAt(path, offset)
|
||||
if limit > 0 {
|
||||
rc = &ReadCloserWrapper{
|
||||
Reader: io.LimitReader(rc, limit),
|
||||
Closer: rc,
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(resp, r)
|
||||
return nil, nil
|
||||
|
||||
io.Copy(resp, rc)
|
||||
return nil, rc.Close()
|
||||
}
|
||||
|
||||
// ReadCloserWrapper wraps a LimitReader so that a file is closed once it has been
|
||||
// read
|
||||
type ReadCloserWrapper struct {
|
||||
io.Reader
|
||||
io.Closer
|
||||
}
|
||||
|
||||
func (s *HTTPServer) FileCatRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
@@ -122,10 +172,433 @@ func (s *HTTPServer) FileCatRequest(resp http.ResponseWriter, req *http.Request)
|
||||
return nil, fmt.Errorf("file %q is a directory", path)
|
||||
}
|
||||
|
||||
r, err := fs.ReadAt(path, int64(0), fileInfo.Size)
|
||||
r, err := fs.ReadAt(path, int64(0))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
io.Copy(resp, r)
|
||||
return nil, nil
|
||||
return nil, r.Close()
|
||||
}
|
||||
|
||||
// StreamFrame is used to frame data of a file when streaming
|
||||
type StreamFrame struct {
|
||||
// Offset is the offset the data was read from
|
||||
Offset int64 `json:",omitempty"`
|
||||
|
||||
// Data is the read data
|
||||
Data []byte `json:",omitempty"`
|
||||
|
||||
// File is the file that the data was read from
|
||||
File string `json:",omitempty"`
|
||||
|
||||
// FileEvent is the last file event that occured that could cause the
|
||||
// streams position to change or end
|
||||
FileEvent string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// IsHeartbeat returns if the frame is a heartbeat frame
|
||||
func (s *StreamFrame) IsHeartbeat() bool {
|
||||
return s.Offset == 0 && len(s.Data) == 0 && s.File == "" && s.FileEvent == ""
|
||||
}
|
||||
|
||||
// StreamFramer is used to buffer and send frames as well as heartbeat.
|
||||
type StreamFramer struct {
|
||||
out io.WriteCloser
|
||||
enc *codec.Encoder
|
||||
frameSize int
|
||||
heartbeat *time.Ticker
|
||||
flusher *time.Ticker
|
||||
shutdownCh chan struct{}
|
||||
exitCh chan struct{}
|
||||
|
||||
outbound chan *StreamFrame
|
||||
|
||||
// The mutex protects everything below
|
||||
l sync.Mutex
|
||||
|
||||
// The current working frame
|
||||
f *StreamFrame
|
||||
data *bytes.Buffer
|
||||
|
||||
// Captures whether the framer is running and any error that occured to
|
||||
// cause it to stop.
|
||||
running bool
|
||||
err error
|
||||
}
|
||||
|
||||
// NewStreamFramer creates a new stream framer that will output StreamFrames to
|
||||
// the passed output.
|
||||
func NewStreamFramer(out io.WriteCloser, heartbeatRate, batchWindow time.Duration, frameSize int) *StreamFramer {
|
||||
// Create a JSON encoder
|
||||
enc := codec.NewEncoder(out, jsonHandle)
|
||||
|
||||
// Create the heartbeat and flush ticker
|
||||
heartbeat := time.NewTicker(heartbeatRate)
|
||||
flusher := time.NewTicker(batchWindow)
|
||||
|
||||
return &StreamFramer{
|
||||
out: out,
|
||||
enc: enc,
|
||||
frameSize: frameSize,
|
||||
heartbeat: heartbeat,
|
||||
flusher: flusher,
|
||||
outbound: make(chan *StreamFrame),
|
||||
data: bytes.NewBuffer(make([]byte, 0, 2*frameSize)),
|
||||
shutdownCh: make(chan struct{}),
|
||||
exitCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy is used to cleanup the StreamFramer and flush any pending frames
|
||||
func (s *StreamFramer) Destroy() {
|
||||
s.l.Lock()
|
||||
wasRunning := s.running
|
||||
s.running = false
|
||||
s.f = nil
|
||||
close(s.shutdownCh)
|
||||
s.heartbeat.Stop()
|
||||
s.flusher.Stop()
|
||||
s.l.Unlock()
|
||||
|
||||
// Ensure things were flushed
|
||||
if wasRunning {
|
||||
<-s.exitCh
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts a long lived goroutine that handles sending data as well as
|
||||
// heartbeating
|
||||
func (s *StreamFramer) Run() {
|
||||
s.l.Lock()
|
||||
if s.running {
|
||||
return
|
||||
}
|
||||
|
||||
s.running = true
|
||||
s.l.Unlock()
|
||||
|
||||
go s.run()
|
||||
}
|
||||
|
||||
// ExitCh returns a channel that will be closed when the run loop terminates.
|
||||
func (s *StreamFramer) ExitCh() <-chan struct{} {
|
||||
return s.exitCh
|
||||
}
|
||||
|
||||
// run is the internal run method. It exits if Destroy is called or an error
|
||||
// occurs, in which case the exit channel is closed.
|
||||
func (s *StreamFramer) run() {
|
||||
// Store any error and mark it as not running
|
||||
var err error
|
||||
defer func() {
|
||||
s.l.Lock()
|
||||
s.err = err
|
||||
s.out.Close()
|
||||
close(s.exitCh)
|
||||
close(s.outbound)
|
||||
s.l.Unlock()
|
||||
}()
|
||||
|
||||
// Start a heartbeat/flusher go-routine. This is done seprately to avoid blocking
|
||||
// the outbound channel.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-s.shutdownCh:
|
||||
return
|
||||
case <-s.flusher.C:
|
||||
// Skip if there is nothing to flush
|
||||
s.l.Lock()
|
||||
if s.f == nil {
|
||||
s.l.Unlock()
|
||||
continue
|
||||
}
|
||||
|
||||
// Read the data for the frame, and send it
|
||||
s.f.Data = s.readData()
|
||||
s.outbound <- s.f
|
||||
s.f = nil
|
||||
|
||||
s.l.Unlock()
|
||||
case <-s.heartbeat.C:
|
||||
// Send a heartbeat frame
|
||||
s.outbound <- &StreamFrame{}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
OUTER:
|
||||
for {
|
||||
select {
|
||||
case <-s.shutdownCh:
|
||||
break OUTER
|
||||
case o := <-s.outbound:
|
||||
// Send the frame and then clear the current working frame
|
||||
if err = s.enc.Encode(o); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush any existing frames
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
select {
|
||||
case o := <-s.outbound:
|
||||
// Send the frame and then clear the current working frame
|
||||
if err = s.enc.Encode(o); err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
if s.f != nil {
|
||||
s.f.Data = s.readData()
|
||||
s.enc.Encode(s.f)
|
||||
}
|
||||
}
|
||||
|
||||
// readData is a helper which reads the buffered data returning up to the frame
|
||||
// size of data. Must be called with the lock held. The returned value is
|
||||
// invalid on the next read or write into the StreamFramer buffer
|
||||
func (s *StreamFramer) readData() []byte {
|
||||
// Compute the amount to read from the buffer
|
||||
size := s.data.Len()
|
||||
if size > s.frameSize {
|
||||
size = s.frameSize
|
||||
}
|
||||
if size == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.data.Next(size)
|
||||
}
|
||||
|
||||
// Send creates and sends a StreamFrame based on the passed parameters. An error
|
||||
// is returned if the run routine hasn't run or encountered an error. Send is
|
||||
// asyncronous and does not block for the data to be transferred.
|
||||
func (s *StreamFramer) Send(file, fileEvent string, data []byte, offset int64) error {
|
||||
s.l.Lock()
|
||||
defer s.l.Unlock()
|
||||
|
||||
// If we are not running, return the error that caused us to not run or
|
||||
// indicated that it was never started.
|
||||
if !s.running {
|
||||
if s.err != nil {
|
||||
return s.err
|
||||
}
|
||||
return fmt.Errorf("StreamFramer not running")
|
||||
}
|
||||
|
||||
// Check if not mergeable
|
||||
if s.f != nil && (s.f.File != file || s.f.FileEvent != fileEvent) {
|
||||
// Flush the old frame
|
||||
s.outbound <- &StreamFrame{
|
||||
Offset: s.f.Offset,
|
||||
File: s.f.File,
|
||||
FileEvent: s.f.FileEvent,
|
||||
Data: s.readData(),
|
||||
}
|
||||
s.f = nil
|
||||
}
|
||||
|
||||
// Store the new data as the current frame.
|
||||
if s.f == nil {
|
||||
s.f = &StreamFrame{
|
||||
Offset: offset,
|
||||
File: file,
|
||||
FileEvent: fileEvent,
|
||||
}
|
||||
}
|
||||
|
||||
// Write the data to the buffer
|
||||
s.data.Write(data)
|
||||
|
||||
// Handle the delete case in which there is no data
|
||||
if s.data.Len() == 0 && s.f.FileEvent != "" {
|
||||
s.outbound <- &StreamFrame{
|
||||
Offset: s.f.Offset,
|
||||
File: s.f.File,
|
||||
FileEvent: s.f.FileEvent,
|
||||
}
|
||||
}
|
||||
|
||||
// Flush till we are under the max frame size
|
||||
for s.data.Len() >= s.frameSize {
|
||||
// Create a new frame to send it
|
||||
s.outbound <- &StreamFrame{
|
||||
Offset: s.f.Offset,
|
||||
File: s.f.File,
|
||||
FileEvent: s.f.FileEvent,
|
||||
Data: s.readData(),
|
||||
}
|
||||
}
|
||||
|
||||
if s.data.Len() == 0 {
|
||||
s.f = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stream streams the content of a file blocking on EOF.
|
||||
// The parameters are:
|
||||
// * path: path to file to stream.
|
||||
// * offset: The offset to start streaming data at, defaults to zero.
|
||||
// * origin: Either "start" or "end" and defines from where the offset is
|
||||
// applied. Defaults to "start".
|
||||
func (s *HTTPServer) Stream(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
var allocID, path string
|
||||
var err error
|
||||
|
||||
q := req.URL.Query()
|
||||
|
||||
if allocID = strings.TrimPrefix(req.URL.Path, "/v1/client/fs/stream/"); allocID == "" {
|
||||
return nil, allocIDNotPresentErr
|
||||
}
|
||||
|
||||
if path = q.Get("path"); path == "" {
|
||||
return nil, fileNameNotPresentErr
|
||||
}
|
||||
|
||||
var offset int64
|
||||
offsetString := q.Get("offset")
|
||||
if offsetString != "" {
|
||||
var err error
|
||||
if offset, err = strconv.ParseInt(offsetString, 10, 64); err != nil {
|
||||
return nil, fmt.Errorf("error parsing offset: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
origin := q.Get("origin")
|
||||
switch origin {
|
||||
case "start", "end":
|
||||
case "":
|
||||
origin = "start"
|
||||
default:
|
||||
return nil, invalidOrigin
|
||||
}
|
||||
|
||||
fs, err := s.agent.client.GetAllocFS(allocID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileInfo, err := fs.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileInfo.IsDir {
|
||||
return nil, fmt.Errorf("file %q is a directory", path)
|
||||
}
|
||||
|
||||
// If offsetting from the end subtract from the size
|
||||
if origin == "end" {
|
||||
offset = fileInfo.Size - offset
|
||||
|
||||
}
|
||||
|
||||
// Create an output that gets flushed on every write
|
||||
output := ioutils.NewWriteFlusher(resp)
|
||||
|
||||
return nil, s.stream(offset, path, fs, output)
|
||||
}
|
||||
|
||||
func (s *HTTPServer) stream(offset int64, path string, fs allocdir.AllocDirFS, output io.WriteCloser) error {
|
||||
// Get the reader
|
||||
f, err := fs.ReadAt(path, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Create a tomb to cancel watch events
|
||||
t := tomb.Tomb{}
|
||||
defer func() {
|
||||
t.Kill(nil)
|
||||
t.Done()
|
||||
}()
|
||||
|
||||
// Create the framer
|
||||
framer := NewStreamFramer(output, streamHeartbeatRate, streamBatchWindow, streamFrameSize)
|
||||
framer.Run()
|
||||
defer framer.Destroy()
|
||||
|
||||
// Create a variable to allow setting the last event
|
||||
var lastEvent string
|
||||
|
||||
// Only create the file change watcher once. But we need to do it after we
|
||||
// read and reach EOF.
|
||||
var changes *watch.FileChanges
|
||||
|
||||
// Start streaming the data
|
||||
data := make([]byte, streamFrameSize)
|
||||
OUTER:
|
||||
for {
|
||||
// Read up to the max frame size
|
||||
n, readErr := f.Read(data)
|
||||
|
||||
// Update the offset
|
||||
offset += int64(n)
|
||||
|
||||
// Return non-EOF errors
|
||||
if readErr != nil && readErr != io.EOF {
|
||||
return readErr
|
||||
}
|
||||
|
||||
// Send the frame
|
||||
if n != 0 {
|
||||
if err := framer.Send(path, lastEvent, data[:n], offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the last event
|
||||
if lastEvent != "" {
|
||||
lastEvent = ""
|
||||
}
|
||||
|
||||
// Just keep reading
|
||||
if readErr == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// If EOF is hit, wait for a change to the file
|
||||
if changes == nil {
|
||||
changes, err = fs.ChangeEvents(path, offset, &t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-changes.Modified:
|
||||
continue OUTER
|
||||
case <-changes.Deleted:
|
||||
return framer.Send(path, deleteEvent, nil, offset)
|
||||
case <-changes.Truncated:
|
||||
// Close the current reader
|
||||
if err := f.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get a new reader at offset zero
|
||||
offset = 0
|
||||
var err error
|
||||
f, err = fs.ReadAt(path, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Store the last event
|
||||
lastEvent = truncateEvent
|
||||
continue OUTER
|
||||
case <-framer.ExitCh():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,19 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/client/allocdir"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
"github.com/ugorji/go/codec"
|
||||
)
|
||||
|
||||
func TestAllocDirFS_List_MissingParams(t *testing.T) {
|
||||
@@ -84,3 +94,532 @@ func TestAllocDirFS_ReadAt_MissingParams(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type WriteCloseChecker struct {
|
||||
io.WriteCloser
|
||||
Closed bool
|
||||
}
|
||||
|
||||
func (w *WriteCloseChecker) Close() error {
|
||||
w.Closed = true
|
||||
return w.WriteCloser.Close()
|
||||
}
|
||||
|
||||
// This test checks, that even if the frame size has not been hit, a flush will
|
||||
// periodically occur.
|
||||
func TestStreamFramer_Flush(t *testing.T) {
|
||||
// Create the stream framer
|
||||
r, w := io.Pipe()
|
||||
wrappedW := &WriteCloseChecker{WriteCloser: w}
|
||||
hRate, bWindow := 100*time.Millisecond, 100*time.Millisecond
|
||||
sf := NewStreamFramer(wrappedW, hRate, bWindow, 100)
|
||||
sf.Run()
|
||||
|
||||
// Create a decoder
|
||||
dec := codec.NewDecoder(r, jsonHandle)
|
||||
|
||||
f := "foo"
|
||||
fe := "bar"
|
||||
d := []byte{0xa}
|
||||
o := int64(10)
|
||||
|
||||
// Start the reader
|
||||
resultCh := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
t.Fatalf("failed to decode")
|
||||
}
|
||||
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(frame.Data, d) && frame.Offset == o && frame.File == f && frame.FileEvent == fe {
|
||||
resultCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
// Write only 1 byte so we do not hit the frame size
|
||||
if err := sf.Send(f, fe, d, o); err != nil {
|
||||
t.Fatalf("Send() failed %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-resultCh:
|
||||
case <-time.After(2 * bWindow):
|
||||
t.Fatalf("failed to flush")
|
||||
}
|
||||
|
||||
// Close the reader and wait. This should cause the runner to exit
|
||||
if err := r.Close(); err != nil {
|
||||
t.Fatalf("failed to close reader")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-sf.ExitCh():
|
||||
case <-time.After(2 * hRate):
|
||||
t.Fatalf("exit channel should close")
|
||||
}
|
||||
|
||||
sf.Destroy()
|
||||
if !wrappedW.Closed {
|
||||
t.Fatalf("writer not closed")
|
||||
}
|
||||
}
|
||||
|
||||
// This test checks that frames will be batched till the frame size is hit (in
|
||||
// the case that is before the flush).
|
||||
func TestStreamFramer_Batch(t *testing.T) {
|
||||
// Create the stream framer
|
||||
r, w := io.Pipe()
|
||||
wrappedW := &WriteCloseChecker{WriteCloser: w}
|
||||
// Ensure the batch window doesn't get hit
|
||||
hRate, bWindow := 100*time.Millisecond, 500*time.Millisecond
|
||||
sf := NewStreamFramer(wrappedW, hRate, bWindow, 3)
|
||||
sf.Run()
|
||||
|
||||
// Create a decoder
|
||||
dec := codec.NewDecoder(r, jsonHandle)
|
||||
|
||||
f := "foo"
|
||||
fe := "bar"
|
||||
d := []byte{0xa, 0xb, 0xc}
|
||||
o := int64(10)
|
||||
|
||||
// Start the reader
|
||||
resultCh := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
t.Fatalf("failed to decode")
|
||||
}
|
||||
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(frame.Data, d) && frame.Offset == o && frame.File == f && frame.FileEvent == fe {
|
||||
resultCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Write only 1 byte so we do not hit the frame size
|
||||
if err := sf.Send(f, fe, d[:1], o); err != nil {
|
||||
t.Fatalf("Send() failed %v", err)
|
||||
}
|
||||
|
||||
// Ensure we didn't get any data
|
||||
select {
|
||||
case <-resultCh:
|
||||
t.Fatalf("Got data before frame size reached")
|
||||
case <-time.After(bWindow / 2):
|
||||
}
|
||||
|
||||
// Write the rest so we hit the frame size
|
||||
if err := sf.Send(f, fe, d[1:], o); err != nil {
|
||||
t.Fatalf("Send() failed %v", err)
|
||||
}
|
||||
|
||||
// Ensure we get data
|
||||
select {
|
||||
case <-resultCh:
|
||||
case <-time.After(2 * bWindow):
|
||||
t.Fatalf("Did not receive data after batch size reached")
|
||||
}
|
||||
|
||||
// Close the reader and wait. This should cause the runner to exit
|
||||
if err := r.Close(); err != nil {
|
||||
t.Fatalf("failed to close reader")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-sf.ExitCh():
|
||||
case <-time.After(2 * hRate):
|
||||
t.Fatalf("exit channel should close")
|
||||
}
|
||||
|
||||
sf.Destroy()
|
||||
if !wrappedW.Closed {
|
||||
t.Fatalf("writer not closed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStreamFramer_Heartbeat(t *testing.T) {
|
||||
// Create the stream framer
|
||||
r, w := io.Pipe()
|
||||
wrappedW := &WriteCloseChecker{WriteCloser: w}
|
||||
hRate, bWindow := 100*time.Millisecond, 100*time.Millisecond
|
||||
sf := NewStreamFramer(wrappedW, hRate, bWindow, 100)
|
||||
sf.Run()
|
||||
|
||||
// Create a decoder
|
||||
dec := codec.NewDecoder(r, jsonHandle)
|
||||
|
||||
// Start the reader
|
||||
resultCh := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
t.Fatalf("failed to decode")
|
||||
}
|
||||
|
||||
if frame.IsHeartbeat() {
|
||||
resultCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-resultCh:
|
||||
case <-time.After(2 * hRate):
|
||||
t.Fatalf("failed to heartbeat")
|
||||
}
|
||||
|
||||
// Close the reader and wait. This should cause the runner to exit
|
||||
if err := r.Close(); err != nil {
|
||||
t.Fatalf("failed to close reader")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-sf.ExitCh():
|
||||
case <-time.After(2 * hRate):
|
||||
t.Fatalf("exit channel should close")
|
||||
}
|
||||
|
||||
sf.Destroy()
|
||||
if !wrappedW.Closed {
|
||||
t.Fatalf("writer not closed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHTTP_Stream_MissingParams(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
req, err := http.NewRequest("GET", "/v1/client/fs/stream/", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW := httptest.NewRecorder()
|
||||
|
||||
_, err = s.Server.Stream(respW, req)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("GET", "/v1/client/fs/stream/foo", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW = httptest.NewRecorder()
|
||||
|
||||
_, err = s.Server.Stream(respW, req)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
|
||||
req, err = http.NewRequest("GET", "/v1/client/fs/stream/foo?path=/path/to/file", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
respW = httptest.NewRecorder()
|
||||
|
||||
_, err = s.Server.Stream(respW, req)
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// tempAllocDir returns a new alloc dir that is rooted in a temp dir. The caller
|
||||
// should destroy the temp dir.
|
||||
func tempAllocDir(t *testing.T) *allocdir.AllocDir {
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("TempDir() failed: %v", err)
|
||||
}
|
||||
|
||||
return allocdir.NewAllocDir(dir)
|
||||
}
|
||||
|
||||
type nopWriteCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (n nopWriteCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestHTTP_Stream_NoFile(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
// Get a temp alloc dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
||||
if err := s.Server.stream(0, "foo", ad, nopWriteCloser{ioutil.Discard}); err == nil {
|
||||
t.Fatalf("expected an error when streaming unknown file")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_Stream_Modify(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
// Get a temp alloc dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
||||
// Create a file in the temp dir
|
||||
streamFile := "stream_file"
|
||||
f, err := os.Create(filepath.Join(ad.AllocDir, streamFile))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Create a decoder
|
||||
r, w := io.Pipe()
|
||||
defer r.Close()
|
||||
defer w.Close()
|
||||
dec := codec.NewDecoder(r, jsonHandle)
|
||||
|
||||
data := []byte("helloworld")
|
||||
|
||||
// Start the reader
|
||||
resultCh := make(chan struct{})
|
||||
go func() {
|
||||
var collected []byte
|
||||
for {
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
t.Fatalf("failed to decode: %v", err)
|
||||
}
|
||||
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
collected = append(collected, frame.Data...)
|
||||
if reflect.DeepEqual(data, collected) {
|
||||
resultCh <- struct{}{}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Write a few bytes
|
||||
if _, err := f.Write(data[:3]); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
|
||||
// Start streaming
|
||||
go func() {
|
||||
if err := s.Server.stream(0, streamFile, ad, w); err != nil {
|
||||
t.Fatalf("stream() failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Sleep a little before writing more. This lets us check if the watch
|
||||
// is working.
|
||||
time.Sleep(1 * time.Second)
|
||||
if _, err := f.Write(data[3:]); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-resultCh:
|
||||
case <-time.After(2 * streamBatchWindow):
|
||||
t.Fatalf("failed to send new data")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_Stream_Truncate(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
// Get a temp alloc dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
||||
// Create a file in the temp dir
|
||||
streamFile := "stream_file"
|
||||
streamFilePath := filepath.Join(ad.AllocDir, streamFile)
|
||||
f, err := os.Create(streamFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Create a decoder
|
||||
r, w := io.Pipe()
|
||||
defer r.Close()
|
||||
defer w.Close()
|
||||
dec := codec.NewDecoder(r, jsonHandle)
|
||||
|
||||
data := []byte("helloworld")
|
||||
|
||||
// Start the reader
|
||||
truncateCh := make(chan struct{})
|
||||
dataPostTruncCh := make(chan struct{})
|
||||
go func() {
|
||||
var collected []byte
|
||||
for {
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
t.Fatalf("failed to decode: %v", err)
|
||||
}
|
||||
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
if frame.FileEvent == truncateEvent {
|
||||
close(truncateCh)
|
||||
}
|
||||
|
||||
collected = append(collected, frame.Data...)
|
||||
if reflect.DeepEqual(data, collected) {
|
||||
close(dataPostTruncCh)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Write a few bytes
|
||||
if _, err := f.Write(data[:3]); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
|
||||
// Start streaming
|
||||
go func() {
|
||||
if err := s.Server.stream(0, streamFile, ad, w); err != nil {
|
||||
t.Fatalf("stream() failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Sleep a little before truncating. This lets us check if the watch
|
||||
// is working.
|
||||
time.Sleep(1 * time.Second)
|
||||
if err := f.Truncate(0); err != nil {
|
||||
t.Fatalf("truncate failed: %v", err)
|
||||
}
|
||||
if err := f.Sync(); err != nil {
|
||||
t.Fatalf("sync failed: %v", err)
|
||||
}
|
||||
if err := f.Close(); err != nil {
|
||||
t.Fatalf("failed to close file: %v", err)
|
||||
}
|
||||
|
||||
f2, err := os.OpenFile(streamFilePath, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to reopen file: %v", err)
|
||||
}
|
||||
defer f2.Close()
|
||||
if _, err := f2.Write(data[3:5]); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-truncateCh:
|
||||
case <-time.After(2 * streamBatchWindow):
|
||||
t.Fatalf("did not receive truncate")
|
||||
}
|
||||
|
||||
// Sleep a little before writing more. This lets us check if the watch
|
||||
// is working.
|
||||
time.Sleep(1 * time.Second)
|
||||
if _, err := f2.Write(data[5:]); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-dataPostTruncCh:
|
||||
case <-time.After(2 * streamBatchWindow):
|
||||
t.Fatalf("did not receive post truncate data")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestHTTP_Stream_Delete(t *testing.T) {
|
||||
httpTest(t, nil, func(s *TestServer) {
|
||||
// Get a temp alloc dir
|
||||
ad := tempAllocDir(t)
|
||||
defer os.RemoveAll(ad.AllocDir)
|
||||
|
||||
// Create a file in the temp dir
|
||||
streamFile := "stream_file"
|
||||
streamFilePath := filepath.Join(ad.AllocDir, streamFile)
|
||||
f, err := os.Create(streamFilePath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Create a decoder
|
||||
r, w := io.Pipe()
|
||||
wrappedW := &WriteCloseChecker{WriteCloser: w}
|
||||
defer r.Close()
|
||||
defer w.Close()
|
||||
dec := codec.NewDecoder(r, jsonHandle)
|
||||
|
||||
data := []byte("helloworld")
|
||||
|
||||
// Start the reader
|
||||
deleteCh := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
var frame StreamFrame
|
||||
if err := dec.Decode(&frame); err != nil {
|
||||
t.Fatalf("failed to decode: %v", err)
|
||||
}
|
||||
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
if frame.FileEvent == deleteEvent {
|
||||
close(deleteCh)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Write a few bytes
|
||||
if _, err := f.Write(data[:3]); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
|
||||
// Start streaming
|
||||
go func() {
|
||||
if err := s.Server.stream(0, streamFile, ad, wrappedW); err != nil {
|
||||
t.Fatalf("stream() failed: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Sleep a little before deleting. This lets us check if the watch
|
||||
// is working.
|
||||
time.Sleep(1 * time.Second)
|
||||
if err := os.Remove(streamFilePath); err != nil {
|
||||
t.Fatalf("delete failed: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-deleteCh:
|
||||
case <-time.After(4 * streamBatchWindow):
|
||||
t.Fatalf("did not receive delete")
|
||||
}
|
||||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
return wrappedW.Closed, nil
|
||||
}, func(err error) {
|
||||
t.Fatalf("connection not closed")
|
||||
})
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
133
command/fs.go
133
command/fs.go
@@ -5,13 +5,26 @@ import (
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
)
|
||||
|
||||
const (
|
||||
// bytesToLines is an estimation of how many bytes are in each log line.
|
||||
// This is used to set the offset to read from when a user specifies how
|
||||
// many lines to tail from.
|
||||
bytesToLines int64 = 120
|
||||
|
||||
// defaultTailLines is the number of lines to tail by default if the value
|
||||
// is not overriden.
|
||||
defaultTailLines int64 = 10
|
||||
)
|
||||
|
||||
type FSCommand struct {
|
||||
Meta
|
||||
}
|
||||
@@ -42,6 +55,21 @@ FS Specific Options:
|
||||
-stat
|
||||
Show file stat information instead of displaying the file, or listing the directory.
|
||||
|
||||
-tail
|
||||
Show the files contents with offsets relative to the end of the file. If no
|
||||
offset is given, -n is defaulted to 10.
|
||||
|
||||
-n
|
||||
Sets the tail location in best-efforted number of lines relative to the end
|
||||
of the file.
|
||||
|
||||
-c
|
||||
Sets the tail location in number of bytes relative to the end of the file.
|
||||
|
||||
-f
|
||||
Causes the output to not stop when the end of the file is reached, but
|
||||
rather to wait for additional output.
|
||||
|
||||
`
|
||||
return strings.TrimSpace(helpText)
|
||||
}
|
||||
@@ -51,13 +79,19 @@ func (f *FSCommand) Synopsis() string {
|
||||
}
|
||||
|
||||
func (f *FSCommand) Run(args []string) int {
|
||||
var verbose, machine, job, stat bool
|
||||
var verbose, machine, job, stat, tail, follow bool
|
||||
var numLines, numBytes int64
|
||||
|
||||
flags := f.Meta.FlagSet("fs-list", FlagSetClient)
|
||||
flags.Usage = func() { f.Ui.Output(f.Help()) }
|
||||
flags.BoolVar(&verbose, "verbose", false, "")
|
||||
flags.BoolVar(&machine, "H", false, "")
|
||||
flags.BoolVar(&job, "job", false, "")
|
||||
flags.BoolVar(&stat, "stat", false, "")
|
||||
flags.BoolVar(&follow, "f", false, "")
|
||||
flags.BoolVar(&tail, "tail", false, "")
|
||||
flags.Int64Var(&numLines, "n", -1, "")
|
||||
flags.Int64Var(&numBytes, "c", -1, "")
|
||||
|
||||
if err := flags.Parse(args); err != nil {
|
||||
return 1
|
||||
@@ -212,19 +246,112 @@ nomad alloc-status %s`, allocID, allocID)
|
||||
)
|
||||
}
|
||||
f.Ui.Output(formatList(out))
|
||||
} else {
|
||||
// We have a file, cat it.
|
||||
return 0
|
||||
}
|
||||
|
||||
// We have a file, output it.
|
||||
if !tail {
|
||||
r, _, err := client.AllocFS().Cat(alloc, path, nil)
|
||||
if err != nil {
|
||||
f.Ui.Error(fmt.Sprintf("Error reading file: %s", err))
|
||||
return 1
|
||||
}
|
||||
io.Copy(os.Stdout, r)
|
||||
} else {
|
||||
// Parse the offset
|
||||
var offset int64 = defaultTailLines * bytesToLines
|
||||
|
||||
if nLines, nBytes := numLines != -1, numBytes != -1; nLines && nBytes {
|
||||
f.Ui.Error("Both -n and -c set")
|
||||
return 1
|
||||
} else if nLines {
|
||||
offset = numLines * bytesToLines
|
||||
} else if nBytes {
|
||||
offset = numBytes
|
||||
}
|
||||
|
||||
if offset > file.Size {
|
||||
offset = file.Size
|
||||
}
|
||||
|
||||
var err error
|
||||
if follow {
|
||||
err = f.followFile(client, alloc, path, offset)
|
||||
} else {
|
||||
// This offset needs to be relative from the front versus the follow
|
||||
// is relative to the end
|
||||
offset = file.Size - offset
|
||||
r, _, err := client.AllocFS().ReadAt(alloc, path, offset, -1, nil)
|
||||
if err != nil {
|
||||
f.Ui.Error(fmt.Sprintf("Error reading file: %s", err))
|
||||
return 1
|
||||
}
|
||||
io.Copy(os.Stdout, r)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
f.Ui.Error(fmt.Sprintf("Error tailing file: %v", err))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// followFile outputs the contents of the file to stdout relative to the end of
|
||||
// the file. If numLines and numBytes are both less than zero, the default
|
||||
// output is defaulted to 10 lines.
|
||||
func (f *FSCommand) followFile(client *api.Client, alloc *api.Allocation,
|
||||
path string, offset int64) error {
|
||||
|
||||
cancel := make(chan struct{})
|
||||
frames, _, err := client.AllocFS().Stream(alloc, path, api.OriginEnd, offset, cancel, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signalCh := make(chan os.Signal, 1)
|
||||
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
var frame *api.StreamFrame
|
||||
var ok bool
|
||||
for {
|
||||
select {
|
||||
case <-signalCh:
|
||||
// End the streaming
|
||||
close(cancel)
|
||||
|
||||
// Output the last offset
|
||||
if frame != nil && frame.Offset > 0 {
|
||||
f.Ui.Output(fmt.Sprintf("\nLast outputted offset (bytes): %d", frame.Offset))
|
||||
}
|
||||
|
||||
return nil
|
||||
case frame, ok = <-frames:
|
||||
if !ok {
|
||||
// Connection has been killed
|
||||
return nil
|
||||
}
|
||||
|
||||
if frame == nil {
|
||||
panic("received nil frame; please report as a bug")
|
||||
}
|
||||
|
||||
if frame.IsHeartbeat() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Print the file event
|
||||
if frame.FileEvent != "" {
|
||||
f.Ui.Output(fmt.Sprintf("nomad: FileEvent %q", frame.FileEvent))
|
||||
}
|
||||
|
||||
fmt.Print(string(frame.Data))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get Random Allocation ID from a known jobID. Prefer to use a running allocation,
|
||||
// but use a dead allocation if no running allocations are found
|
||||
func getRandomJobAlloc(client *api.Client, jobID string) (string, error) {
|
||||
|
||||
@@ -372,7 +372,7 @@ func (s *Server) reapDupBlockedEvaluations(stopCh chan struct{}) {
|
||||
|
||||
// periodicUnblockFailedEvals periodically unblocks failed, blocked evaluations.
|
||||
func (s *Server) periodicUnblockFailedEvals(stopCh chan struct{}) {
|
||||
ticker := time.NewTimer(failedEvalUnblockInterval)
|
||||
ticker := time.NewTicker(failedEvalUnblockInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
|
||||
21
vendor/github.com/hpcloud/tail/LICENSE.txt
generated
vendored
Normal file
21
vendor/github.com/hpcloud/tail/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# The MIT License (MIT)
|
||||
|
||||
# © Copyright 2015 Hewlett Packard Enterprise Development LP
|
||||
Copyright (c) 2014 ActiveState
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
48
vendor/github.com/hpcloud/tail/util/util.go
generated
vendored
Normal file
48
vendor/github.com/hpcloud/tail/util/util.go
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
*log.Logger
|
||||
}
|
||||
|
||||
var LOGGER = &Logger{log.New(os.Stderr, "", log.LstdFlags)}
|
||||
|
||||
// fatal is like panic except it displays only the current goroutine's stack.
|
||||
func Fatal(format string, v ...interface{}) {
|
||||
// https://github.com/hpcloud/log/blob/master/log.go#L45
|
||||
LOGGER.Output(2, fmt.Sprintf("FATAL -- "+format, v...)+"\n"+string(debug.Stack()))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// partitionString partitions the string into chunks of given size,
|
||||
// with the last chunk of variable size.
|
||||
func PartitionString(s string, chunkSize int) []string {
|
||||
if chunkSize <= 0 {
|
||||
panic("invalid chunkSize")
|
||||
}
|
||||
length := len(s)
|
||||
chunks := 1 + length/chunkSize
|
||||
start := 0
|
||||
end := chunkSize
|
||||
parts := make([]string, 0, chunks)
|
||||
for {
|
||||
if end > length {
|
||||
end = length
|
||||
}
|
||||
parts = append(parts, s[start:end])
|
||||
if end == length {
|
||||
break
|
||||
}
|
||||
start, end = end, end+chunkSize
|
||||
}
|
||||
return parts
|
||||
}
|
||||
36
vendor/github.com/hpcloud/tail/watch/filechanges.go
generated
vendored
Normal file
36
vendor/github.com/hpcloud/tail/watch/filechanges.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package watch
|
||||
|
||||
type FileChanges struct {
|
||||
Modified chan bool // Channel to get notified of modifications
|
||||
Truncated chan bool // Channel to get notified of truncations
|
||||
Deleted chan bool // Channel to get notified of deletions/renames
|
||||
}
|
||||
|
||||
func NewFileChanges() *FileChanges {
|
||||
return &FileChanges{
|
||||
make(chan bool), make(chan bool), make(chan bool)}
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyModified() {
|
||||
sendOnlyIfEmpty(fc.Modified)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyTruncated() {
|
||||
sendOnlyIfEmpty(fc.Truncated)
|
||||
}
|
||||
|
||||
func (fc *FileChanges) NotifyDeleted() {
|
||||
sendOnlyIfEmpty(fc.Deleted)
|
||||
}
|
||||
|
||||
// sendOnlyIfEmpty sends on a bool channel only if the channel has no
|
||||
// backlog to be read by other goroutines. This concurrency pattern
|
||||
// can be used to notify other goroutines if and only if they are
|
||||
// looking for it (i.e., subsequent notifications can be compressed
|
||||
// into one).
|
||||
func sendOnlyIfEmpty(ch chan bool) {
|
||||
select {
|
||||
case ch <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
128
vendor/github.com/hpcloud/tail/watch/inotify.go
generated
vendored
Normal file
128
vendor/github.com/hpcloud/tail/watch/inotify.go
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// InotifyFileWatcher uses inotify to monitor file changes.
|
||||
type InotifyFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewInotifyFileWatcher(filename string) *InotifyFileWatcher {
|
||||
fw := &InotifyFileWatcher{filepath.Clean(filename), 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
err := WatchCreate(fw.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer RemoveWatchCreate(fw.Filename)
|
||||
|
||||
// Do a real check now as the file might have been created before
|
||||
// calling `WatchFlags` above.
|
||||
if _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {
|
||||
// file exists, or stat returned an error.
|
||||
return err
|
||||
}
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
select {
|
||||
case evt, ok := <-events:
|
||||
if !ok {
|
||||
return fmt.Errorf("inotify watcher has been closed")
|
||||
}
|
||||
evtName, err := filepath.Abs(evt.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fwFilename, err := filepath.Abs(fw.Filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if evtName == fwFilename {
|
||||
return nil
|
||||
}
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
err := Watch(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
defer RemoveWatch(fw.Filename)
|
||||
|
||||
events := Events(fw.Filename)
|
||||
|
||||
for {
|
||||
prevSize := fw.Size
|
||||
|
||||
var evt fsnotify.Event
|
||||
var ok bool
|
||||
|
||||
select {
|
||||
case evt, ok = <-events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
case <-t.Dying():
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case evt.Op&fsnotify.Remove == fsnotify.Remove:
|
||||
fallthrough
|
||||
|
||||
case evt.Op&fsnotify.Rename == fsnotify.Rename:
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
|
||||
case evt.Op&fsnotify.Write == fsnotify.Write:
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
fw.Size = fi.Size()
|
||||
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
} else {
|
||||
changes.NotifyModified()
|
||||
}
|
||||
prevSize = fw.Size
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
260
vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
generated
vendored
Normal file
260
vendor/github.com/hpcloud/tail/watch/inotify_tracker.go
generated
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
|
||||
"gopkg.in/fsnotify.v1"
|
||||
)
|
||||
|
||||
type InotifyTracker struct {
|
||||
mux sync.Mutex
|
||||
watcher *fsnotify.Watcher
|
||||
chans map[string]chan fsnotify.Event
|
||||
done map[string]chan bool
|
||||
watchNums map[string]int
|
||||
watch chan *watchInfo
|
||||
remove chan *watchInfo
|
||||
error chan error
|
||||
}
|
||||
|
||||
type watchInfo struct {
|
||||
op fsnotify.Op
|
||||
fname string
|
||||
}
|
||||
|
||||
func (this *watchInfo) isCreate() bool {
|
||||
return this.op == fsnotify.Create
|
||||
}
|
||||
|
||||
var (
|
||||
// globally shared InotifyTracker; ensures only one fsnotify.Watcher is used
|
||||
shared *InotifyTracker
|
||||
|
||||
// these are used to ensure the shared InotifyTracker is run exactly once
|
||||
once = sync.Once{}
|
||||
goRun = func() {
|
||||
shared = &InotifyTracker{
|
||||
mux: sync.Mutex{},
|
||||
chans: make(map[string]chan fsnotify.Event),
|
||||
done: make(map[string]chan bool),
|
||||
watchNums: make(map[string]int),
|
||||
watch: make(chan *watchInfo),
|
||||
remove: make(chan *watchInfo),
|
||||
error: make(chan error),
|
||||
}
|
||||
go shared.run()
|
||||
}
|
||||
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
// Watch signals the run goroutine to begin watching the input filename
|
||||
func Watch(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// Watch create signals the run goroutine to begin watching the input filename
|
||||
// if call the WatchCreate function, don't call the Cleanup, call the RemoveWatchCreate
|
||||
func WatchCreate(fname string) error {
|
||||
return watch(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func watch(winfo *watchInfo) error {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.watch <- winfo
|
||||
return <-shared.error
|
||||
}
|
||||
|
||||
// RemoveWatch signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatch(fname string) {
|
||||
remove(&watchInfo{
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
// RemoveWatch create signals the run goroutine to remove the watch for the input filename
|
||||
func RemoveWatchCreate(fname string) {
|
||||
remove(&watchInfo{
|
||||
op: fsnotify.Create,
|
||||
fname: fname,
|
||||
})
|
||||
}
|
||||
|
||||
func remove(winfo *watchInfo) {
|
||||
// start running the shared InotifyTracker if not already running
|
||||
once.Do(goRun)
|
||||
|
||||
winfo.fname = filepath.Clean(winfo.fname)
|
||||
shared.mux.Lock()
|
||||
done := shared.done[winfo.fname]
|
||||
if done != nil {
|
||||
delete(shared.done, winfo.fname)
|
||||
close(done)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
shared.watchNums[fname]--
|
||||
watchNum := shared.watchNums[fname]
|
||||
if watchNum == 0 {
|
||||
delete(shared.watchNums, fname)
|
||||
}
|
||||
shared.mux.Unlock()
|
||||
|
||||
// If we were the last ones to watch this file, unsubscribe from inotify.
|
||||
// This needs to happen after releasing the lock because fsnotify waits
|
||||
// synchronously for the kernel to acknowledge the removal of the watch
|
||||
// for this file, which causes us to deadlock if we still held the lock.
|
||||
if watchNum == 0 {
|
||||
shared.watcher.Remove(fname)
|
||||
}
|
||||
shared.remove <- winfo
|
||||
}
|
||||
|
||||
// Events returns a channel to which FileEvents corresponding to the input filename
|
||||
// will be sent. This channel will be closed when removeWatch is called on this
|
||||
// filename.
|
||||
func Events(fname string) <-chan fsnotify.Event {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
return shared.chans[fname]
|
||||
}
|
||||
|
||||
// Cleanup removes the watch for the input filename if necessary.
|
||||
func Cleanup(fname string) {
|
||||
RemoveWatch(fname)
|
||||
}
|
||||
|
||||
// watchFlags calls fsnotify.WatchFlags for the input filename and flags, creating
|
||||
// a new Watcher if the previous Watcher was closed.
|
||||
func (shared *InotifyTracker) addWatch(winfo *watchInfo) error {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
if shared.chans[winfo.fname] == nil {
|
||||
shared.chans[winfo.fname] = make(chan fsnotify.Event)
|
||||
shared.done[winfo.fname] = make(chan bool)
|
||||
}
|
||||
|
||||
fname := winfo.fname
|
||||
if winfo.isCreate() {
|
||||
// Watch for new files to be created in the parent directory.
|
||||
fname = filepath.Dir(fname)
|
||||
}
|
||||
|
||||
// already in inotify watch
|
||||
if shared.watchNums[fname] > 0 {
|
||||
shared.watchNums[fname]++
|
||||
if winfo.isCreate() {
|
||||
shared.watchNums[winfo.fname]++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err := shared.watcher.Add(fname)
|
||||
if err == nil {
|
||||
shared.watchNums[fname]++
|
||||
if winfo.isCreate() {
|
||||
shared.watchNums[winfo.fname]++
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// removeWatch calls fsnotify.RemoveWatch for the input filename and closes the
|
||||
// corresponding events channel.
|
||||
func (shared *InotifyTracker) removeWatch(winfo *watchInfo) {
|
||||
shared.mux.Lock()
|
||||
defer shared.mux.Unlock()
|
||||
|
||||
ch := shared.chans[winfo.fname]
|
||||
if ch == nil {
|
||||
return
|
||||
}
|
||||
|
||||
delete(shared.chans, winfo.fname)
|
||||
close(ch)
|
||||
|
||||
if !winfo.isCreate() {
|
||||
return
|
||||
}
|
||||
|
||||
shared.watchNums[winfo.fname]--
|
||||
if shared.watchNums[winfo.fname] == 0 {
|
||||
delete(shared.watchNums, winfo.fname)
|
||||
}
|
||||
}
|
||||
|
||||
// sendEvent sends the input event to the appropriate Tail.
|
||||
func (shared *InotifyTracker) sendEvent(event fsnotify.Event) {
|
||||
name := filepath.Clean(event.Name)
|
||||
|
||||
shared.mux.Lock()
|
||||
ch := shared.chans[name]
|
||||
done := shared.done[name]
|
||||
shared.mux.Unlock()
|
||||
|
||||
if ch != nil && done != nil {
|
||||
select {
|
||||
case ch <- event:
|
||||
case <-done:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// run starts the goroutine in which the shared struct reads events from its
|
||||
// Watcher's Event channel and sends the events to the appropriate Tail.
|
||||
func (shared *InotifyTracker) run() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
util.Fatal("failed to create Watcher")
|
||||
}
|
||||
shared.watcher = watcher
|
||||
|
||||
for {
|
||||
select {
|
||||
case winfo := <-shared.watch:
|
||||
shared.error <- shared.addWatch(winfo)
|
||||
|
||||
case winfo := <-shared.remove:
|
||||
shared.removeWatch(winfo)
|
||||
|
||||
case event, open := <-shared.watcher.Events:
|
||||
if !open {
|
||||
return
|
||||
}
|
||||
shared.sendEvent(event)
|
||||
|
||||
case err, open := <-shared.watcher.Errors:
|
||||
if !open {
|
||||
return
|
||||
} else if err != nil {
|
||||
sysErr, ok := err.(*os.SyscallError)
|
||||
if !ok || sysErr.Err != syscall.EINTR {
|
||||
logger.Printf("Error in Watcher Error channel: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
118
vendor/github.com/hpcloud/tail/watch/polling.go
generated
vendored
Normal file
118
vendor/github.com/hpcloud/tail/watch/polling.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/hpcloud/tail/util"
|
||||
"gopkg.in/tomb.v1"
|
||||
)
|
||||
|
||||
// PollingFileWatcher polls the file for changes.
|
||||
type PollingFileWatcher struct {
|
||||
Filename string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func NewPollingFileWatcher(filename string) *PollingFileWatcher {
|
||||
fw := &PollingFileWatcher{filename, 0}
|
||||
return fw
|
||||
}
|
||||
|
||||
var POLL_DURATION time.Duration
|
||||
|
||||
func (fw *PollingFileWatcher) BlockUntilExists(t *tomb.Tomb) error {
|
||||
for {
|
||||
if _, err := os.Stat(fw.Filename); err == nil {
|
||||
return nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
select {
|
||||
case <-time.After(POLL_DURATION):
|
||||
continue
|
||||
case <-t.Dying():
|
||||
return tomb.ErrDying
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (fw *PollingFileWatcher) ChangeEvents(t *tomb.Tomb, pos int64) (*FileChanges, error) {
|
||||
origFi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := NewFileChanges()
|
||||
var prevModTime time.Time
|
||||
|
||||
// XXX: use tomb.Tomb to cleanly manage these goroutines. replace
|
||||
// the fatal (below) with tomb's Kill.
|
||||
|
||||
fw.Size = pos
|
||||
|
||||
go func() {
|
||||
prevSize := fw.Size
|
||||
for {
|
||||
select {
|
||||
case <-t.Dying():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
time.Sleep(POLL_DURATION)
|
||||
fi, err := os.Stat(fw.Filename)
|
||||
if err != nil {
|
||||
// Windows cannot delete a file if a handle is still open (tail keeps one open)
|
||||
// so it gives access denied to anything trying to read it until all handles are released.
|
||||
if os.IsNotExist(err) || (runtime.GOOS == "windows" && os.IsPermission(err)) {
|
||||
// File does not exist (has been deleted).
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// XXX: report this error back to the user
|
||||
util.Fatal("Failed to stat file %v: %v", fw.Filename, err)
|
||||
}
|
||||
|
||||
// File got moved/renamed?
|
||||
if !os.SameFile(origFi, fi) {
|
||||
changes.NotifyDeleted()
|
||||
return
|
||||
}
|
||||
|
||||
// File got truncated?
|
||||
fw.Size = fi.Size()
|
||||
if prevSize > 0 && prevSize > fw.Size {
|
||||
changes.NotifyTruncated()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
// File got bigger?
|
||||
if prevSize > 0 && prevSize < fw.Size {
|
||||
changes.NotifyModified()
|
||||
prevSize = fw.Size
|
||||
continue
|
||||
}
|
||||
prevSize = fw.Size
|
||||
|
||||
// File was appended to (changed)?
|
||||
modTime := fi.ModTime()
|
||||
if modTime != prevModTime {
|
||||
prevModTime = modTime
|
||||
changes.NotifyModified()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
POLL_DURATION = 250 * time.Millisecond
|
||||
}
|
||||
20
vendor/github.com/hpcloud/tail/watch/watch.go
generated
vendored
Normal file
20
vendor/github.com/hpcloud/tail/watch/watch.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) 2015 HPE Software Inc. All rights reserved.
|
||||
// Copyright (c) 2013 ActiveState Software Inc. All rights reserved.
|
||||
|
||||
package watch
|
||||
|
||||
import "gopkg.in/tomb.v1"
|
||||
|
||||
// FileWatcher monitors file-level events.
|
||||
type FileWatcher interface {
|
||||
// BlockUntilExists blocks until the file comes into existence.
|
||||
BlockUntilExists(*tomb.Tomb) error
|
||||
|
||||
// ChangeEvents reports on changes to a file, be it modification,
|
||||
// deletion, renames or truncations. Returned FileChanges group of
|
||||
// channels will be closed, thus become unusable, after a deletion
|
||||
// or truncation event.
|
||||
// In order to properly report truncations, ChangeEvents requires
|
||||
// the caller to pass their current offset in the file.
|
||||
ChangeEvents(*tomb.Tomb, int64) (*FileChanges, error)
|
||||
}
|
||||
44
vendor/gopkg.in/fsnotify.v1/AUTHORS
generated
vendored
Normal file
44
vendor/gopkg.in/fsnotify.v1/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <chris@howey.me> <howeyc@gmail.com>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Daniel Wagner-Hall <dawagner@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Hari haran <hariharan.uno@gmail.com>
|
||||
John C Barstow
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
|
||||
Matt Layher <mdlayher@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Riku Voipio <riku.voipio@linaro.org>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Soge Zhang <zhssoge@gmail.com>
|
||||
Tiffany Jernigan <tiffany.jernigan@intel.com>
|
||||
Tilak Sharma <tilaks@google.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
Yukang <moorekang@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
||||
铁哥 <guotie.9@gmail.com>
|
||||
295
vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
generated
vendored
Normal file
295
vendor/gopkg.in/fsnotify.v1/CHANGELOG.md
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
# Changelog
|
||||
|
||||
## v1.3.1 / 2016-06-28
|
||||
|
||||
* windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## v1.3.0 / 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## v1.2.10 / 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## v1.2.9 / 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## v1.2.8 / 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## v1.2.5 / 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## v1.2.1 / 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## v1.2.0 / 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## v1.1.1 / 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## v1.1.0 / 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
* only need to store flags on directories
|
||||
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
|
||||
* done can be an unbuffered channel
|
||||
* remove calls to os.NewSyscallError
|
||||
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v1.0.4 / 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## v1.0.3 / 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## v1.0.2 / 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## v1.0.0 / 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
* Minor updates based on feedback from golint.
|
||||
|
||||
## dev / 2014-07-09
|
||||
|
||||
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
|
||||
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
|
||||
|
||||
## dev / 2014-07-04
|
||||
|
||||
* kqueue: fix incorrect mutex used in Close()
|
||||
* Update example to demonstrate usage of Op.
|
||||
|
||||
## dev / 2014-06-28
|
||||
|
||||
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
|
||||
* Fix for String() method on Event (thanks Alex Brainman)
|
||||
* Don't build on Plan 9 or Solaris (thanks @4ad)
|
||||
|
||||
## dev / 2014-06-21
|
||||
|
||||
* Events channel of type Event rather than *Event.
|
||||
* [internal] use syscall constants directly for inotify and kqueue.
|
||||
* [internal] kqueue: rename events to kevents and fileEvent to event.
|
||||
|
||||
## dev / 2014-06-19
|
||||
|
||||
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
|
||||
* [internal] remove cookie from Event struct (unused).
|
||||
* [internal] Event struct has the same definition across every OS.
|
||||
* [internal] remove internal watch and removeWatch methods.
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
|
||||
* [API] Pluralized channel names: Events and Errors.
|
||||
* [API] Renamed FileEvent struct to Event.
|
||||
* [API] Op constants replace methods like IsCreate().
|
||||
|
||||
## dev / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## dev / 2014-05-23
|
||||
|
||||
* [API] Remove current implementation of WatchFlags.
|
||||
* current implementation doesn't take advantage of OS for efficiency
|
||||
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## v0.9.3 / 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v0.9.2 / 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on OS X. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## v0.9.1 / 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## v0.9.0 / 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## v0.8.12 / 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## v0.8.11 / 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
|
||||
|
||||
## v0.8.10 / 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## v0.8.9 / 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## v0.8.8 / 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## v0.8.7 / 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## v0.8.6 / 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## v0.8.5 / 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## v0.8.4 / 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## v0.8.3 / 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## v0.8.2 / 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## v0.8.1 / 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## v0.8.0 / 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## v0.7.4 / 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## v0.7.3 / 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## v0.7.2 / 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## v0.7.1 / 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## v0.7.0 / 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## v0.6.0 / 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## v0.5.1 / 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## v0.5.0 / 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## v0.4.0 / 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## v0.3.0 / 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## v0.2.0 / 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## v0.1.0 / 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||
77
vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
generated
vendored
Normal file
77
vendor/gopkg.in/fsnotify.v1/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
# Contributing
|
||||
|
||||
## Issues
|
||||
|
||||
* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
|
||||
* Please indicate the platform you are using fsnotify on.
|
||||
* A code example to reproduce the problem is appreciated.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
### Contributor License Agreement
|
||||
|
||||
fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
|
||||
|
||||
Please indicate that you have signed the CLA in your pull request.
|
||||
|
||||
### How fsnotify is Developed
|
||||
|
||||
* Development is done on feature branches.
|
||||
* Tests are run on BSD, Linux, OS X and Windows.
|
||||
* Pull requests are reviewed and [applied to master][am] using [hub][].
|
||||
* Maintainers may modify or squash commits rather than asking contributors to.
|
||||
* To issue a new release, the maintainers will:
|
||||
* Update the CHANGELOG
|
||||
* Tag a version, which will become available through gopkg.in.
|
||||
|
||||
### How to Fork
|
||||
|
||||
For smooth sailing, always use the original import path. Installing with `go get` makes this easy.
|
||||
|
||||
1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
|
||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||
3. Ensure everything works and the tests pass (see below)
|
||||
4. Commit your changes (`git commit -am 'Add some feature'`)
|
||||
|
||||
Contribute upstream:
|
||||
|
||||
1. Fork fsnotify on GitHub
|
||||
2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
|
||||
3. Push to the branch (`git push fork my-new-feature`)
|
||||
4. Create a new Pull Request on GitHub
|
||||
|
||||
This workflow is [thoroughly explained by Katrina Owen](https://blog.splice.com/contributing-open-source-git-repositories-go/).
|
||||
|
||||
### Testing
|
||||
|
||||
fsnotify uses build tags to compile different code on Linux, BSD, OS X, and Windows.
|
||||
|
||||
Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
|
||||
|
||||
To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
|
||||
|
||||
* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
|
||||
* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
|
||||
* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
|
||||
* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
|
||||
* When you're done, you will want to halt or destroy the Vagrant boxes.
|
||||
|
||||
Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
|
||||
|
||||
Right now there is no equivalent solution for Windows and OS X, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
|
||||
|
||||
### Maintainers
|
||||
|
||||
Help maintaining fsnotify is welcome. To be a maintainer:
|
||||
|
||||
* Submit a pull request and sign the CLA as above.
|
||||
* You must be able to run the test suite on Mac, Windows, Linux and BSD.
|
||||
|
||||
To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
|
||||
|
||||
All code changes should be internal pull requests.
|
||||
|
||||
Releases are tagged using [Semantic Versioning](http://semver.org/).
|
||||
|
||||
[hub]: https://github.com/github/hub
|
||||
[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
|
||||
28
vendor/gopkg.in/fsnotify.v1/LICENSE
generated
vendored
Normal file
28
vendor/gopkg.in/fsnotify.v1/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
50
vendor/gopkg.in/fsnotify.v1/README.md
generated
vendored
Normal file
50
vendor/gopkg.in/fsnotify.v1/README.md
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
# File system notifications for Go
|
||||
|
||||
[](https://godoc.org/github.com/fsnotify/fsnotify) [](https://goreportcard.com/report/github.com/fsnotify/fsnotify) [](http://gocover.io/github.com/fsnotify/fsnotify)
|
||||
|
||||
fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
|
||||
|
||||
```console
|
||||
go get -u golang.org/x/sys/...
|
||||
```
|
||||
|
||||
Cross platform: Windows, Linux, BSD and OS X.
|
||||
|
||||
|Adapter |OS |Status |
|
||||
|----------|----------|----------|
|
||||
|inotify |Linux 2.6.27 or later, Android\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|kqueue |BSD, OS X, iOS\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|ReadDirectoryChangesW|Windows|Supported [](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|
||||
|FSEvents |OS X |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|
||||
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|
||||
|fanotify |Linux 2.6.37+ | |
|
||||
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|
||||
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
|
||||
|
||||
\* Android and iOS are untested.
|
||||
|
||||
Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) for usage. Consult the [Wiki](https://github.com/fsnotify/fsnotify/wiki) for the FAQ and further information.
|
||||
|
||||
## API stability
|
||||
|
||||
fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA).
|
||||
|
||||
All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
|
||||
|
||||
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
|
||||
## Example
|
||||
|
||||
See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
|
||||
|
||||
[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
|
||||
|
||||
## Related Projects
|
||||
|
||||
* [notify](https://github.com/rjeczalik/notify)
|
||||
* [fsevents](https://github.com/fsnotify/fsevents)
|
||||
|
||||
37
vendor/gopkg.in/fsnotify.v1/fen.go
generated
vendored
Normal file
37
vendor/gopkg.in/fsnotify.v1/fen.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build solaris
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
return nil
|
||||
}
|
||||
62
vendor/gopkg.in/fsnotify.v1/fsnotify.go
generated
vendored
Normal file
62
vendor/gopkg.in/fsnotify.v1/fsnotify.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !plan9
|
||||
|
||||
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Event represents a single file system notification.
|
||||
type Event struct {
|
||||
Name string // Relative path to the file or directory.
|
||||
Op Op // File operation that triggered the event.
|
||||
}
|
||||
|
||||
// Op describes a set of file operations.
|
||||
type Op uint32
|
||||
|
||||
// These are the generalized file operations that can trigger a notification.
|
||||
const (
|
||||
Create Op = 1 << iota
|
||||
Write
|
||||
Remove
|
||||
Rename
|
||||
Chmod
|
||||
)
|
||||
|
||||
// String returns a string representation of the event in the form
|
||||
// "file: REMOVE|WRITE|..."
|
||||
func (e Event) String() string {
|
||||
// Use a buffer for efficient string concatenation
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if e.Op&Create == Create {
|
||||
buffer.WriteString("|CREATE")
|
||||
}
|
||||
if e.Op&Remove == Remove {
|
||||
buffer.WriteString("|REMOVE")
|
||||
}
|
||||
if e.Op&Write == Write {
|
||||
buffer.WriteString("|WRITE")
|
||||
}
|
||||
if e.Op&Rename == Rename {
|
||||
buffer.WriteString("|RENAME")
|
||||
}
|
||||
if e.Op&Chmod == Chmod {
|
||||
buffer.WriteString("|CHMOD")
|
||||
}
|
||||
|
||||
// If buffer remains empty, return no event names
|
||||
if buffer.Len() == 0 {
|
||||
return fmt.Sprintf("%q: ", e.Name)
|
||||
}
|
||||
|
||||
// Return a list of event names, with leading pipe character stripped
|
||||
return fmt.Sprintf("%q: %s", e.Name, buffer.String()[1:])
|
||||
}
|
||||
325
vendor/gopkg.in/fsnotify.v1/inotify.go
generated
vendored
Normal file
325
vendor/gopkg.in/fsnotify.v1/inotify.go
generated
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
mu sync.Mutex // Map access
|
||||
cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
|
||||
fd int
|
||||
poller *fdPoller
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
done chan struct{} // Channel for sending a "quit message" to the reader goroutine
|
||||
doneResp chan struct{} // Channel to respond to Close
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
// Create inotify fd
|
||||
fd, errno := unix.InotifyInit()
|
||||
if fd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create epoll
|
||||
poller, err := newFdPoller(fd)
|
||||
if err != nil {
|
||||
unix.Close(fd)
|
||||
return nil, err
|
||||
}
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
poller: poller,
|
||||
watches: make(map[string]*watch),
|
||||
paths: make(map[int]string),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
doneResp: make(chan struct{}),
|
||||
}
|
||||
w.cv = sync.NewCond(&w.mu)
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (w *Watcher) isClosed() bool {
|
||||
select {
|
||||
case <-w.done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Send 'close' signal to goroutine, and set the Watcher to closed.
|
||||
close(w.done)
|
||||
|
||||
// Wake up goroutine
|
||||
w.poller.wake()
|
||||
|
||||
// Wait for goroutine to close
|
||||
<-w.doneResp
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
if w.isClosed() {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
|
||||
unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
|
||||
unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
|
||||
|
||||
var flags uint32 = agnosticEvents
|
||||
|
||||
w.mu.Lock()
|
||||
watchEntry, found := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
watchEntry.flags |= flags
|
||||
flags |= unix.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[name] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = name
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove stops watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
|
||||
// Fetch the watch.
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[name]
|
||||
|
||||
// Remove it from inotify.
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
|
||||
}
|
||||
// inotify_rm_watch will return EINVAL if the file has been deleted;
|
||||
// the inotify will already have been removed.
|
||||
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
|
||||
// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
|
||||
// so that EINVAL means that the wd is being rm_watch()ed or its file removed
|
||||
// by another thread and we have not received IN_IGNORE event.
|
||||
success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
// TODO: Perhaps it's not helpful to return an error here in every case.
|
||||
// the only two possible errors are:
|
||||
// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
|
||||
// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
|
||||
// Watch descriptors are invalidated when they are removed explicitly or implicitly;
|
||||
// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
|
||||
return errno
|
||||
}
|
||||
|
||||
// wait until ignoreLinux() deleting maps
|
||||
exists := true
|
||||
for exists {
|
||||
w.cv.Wait()
|
||||
_, exists = w.watches[name]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Events channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
n int // Number of bytes read with read()
|
||||
errno error // Syscall errno
|
||||
ok bool // For poller.wait
|
||||
)
|
||||
|
||||
defer close(w.doneResp)
|
||||
defer close(w.Errors)
|
||||
defer close(w.Events)
|
||||
defer unix.Close(w.fd)
|
||||
defer w.poller.close()
|
||||
|
||||
for {
|
||||
// See if we have been closed.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ok, errno = w.poller.wait()
|
||||
if errno != nil {
|
||||
select {
|
||||
case w.Errors <- errno:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
n, errno = unix.Read(w.fd, buf[:])
|
||||
// If a signal interrupted execution, see if we've been asked to close, and try again.
|
||||
// http://man7.org/linux/man-pages/man7/signal.7.html :
|
||||
// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
|
||||
// unix.Read might have been woken up by Close. If so, we're done.
|
||||
if w.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if n < unix.SizeofInotifyEvent {
|
||||
var err error
|
||||
if n == 0 {
|
||||
// If EOF is received. This should really never happen.
|
||||
err = io.EOF
|
||||
} else if n < 0 {
|
||||
// If an error occurred while reading.
|
||||
err = errno
|
||||
} else {
|
||||
// Read was too short.
|
||||
err = errors.New("notify: short read in readEvents()")
|
||||
}
|
||||
select {
|
||||
case w.Errors <- err:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-unix.SizeofInotifyEvent) {
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
|
||||
mask := uint32(raw.Mask)
|
||||
nameLen := uint32(raw.Len)
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
name := w.paths[int(raw.Wd)]
|
||||
w.mu.Unlock()
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
event := newEvent(name, mask)
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if !event.ignoreLinux(w, raw.Wd, mask) {
|
||||
select {
|
||||
case w.Events <- event:
|
||||
case <-w.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += unix.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Certain types of events can be "ignored" and not sent over the Events
|
||||
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||
// against files that do not exist.
|
||||
func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
|
||||
// Ignore anything the inotify API says to ignore
|
||||
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
name := w.paths[int(wd)]
|
||||
delete(w.paths, int(wd))
|
||||
delete(w.watches, name)
|
||||
w.cv.Broadcast()
|
||||
return true
|
||||
}
|
||||
|
||||
// If the event is not a DELETE or RENAME, the file must exist.
|
||||
// Otherwise the event is ignored.
|
||||
// *Note*: this was put in place because it was seen that a MODIFY
|
||||
// event was sent after the DELETE. This ignores that MODIFY and
|
||||
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||
if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
|
||||
_, statErr := os.Lstat(e.Name)
|
||||
return os.IsNotExist(statErr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on an inotify mask.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
187
vendor/gopkg.in/fsnotify.v1/inotify_poller.go
generated
vendored
Normal file
187
vendor/gopkg.in/fsnotify.v1/inotify_poller.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fdPoller struct {
|
||||
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||
epfd int // Epoll file descriptor
|
||||
pipe [2]int // Pipe for waking up
|
||||
}
|
||||
|
||||
func emptyPoller(fd int) *fdPoller {
|
||||
poller := new(fdPoller)
|
||||
poller.fd = fd
|
||||
poller.epfd = -1
|
||||
poller.pipe[0] = -1
|
||||
poller.pipe[1] = -1
|
||||
return poller
|
||||
}
|
||||
|
||||
// Create a new inotify poller.
|
||||
// This creates an inotify handler, and an epoll handler.
|
||||
func newFdPoller(fd int) (*fdPoller, error) {
|
||||
var errno error
|
||||
poller := emptyPoller(fd)
|
||||
defer func() {
|
||||
if errno != nil {
|
||||
poller.close()
|
||||
}
|
||||
}()
|
||||
poller.fd = fd
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = unix.EpollCreate1(0)
|
||||
if poller.epfd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register inotify fd with epoll
|
||||
event := unix.EpollEvent{
|
||||
Fd: int32(poller.fd),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
// Register pipe fd with epoll
|
||||
event = unix.EpollEvent{
|
||||
Fd: int32(poller.pipe[0]),
|
||||
Events: unix.EPOLLIN,
|
||||
}
|
||||
errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
||||
return poller, nil
|
||||
}
|
||||
|
||||
// Wait using epoll.
|
||||
// Returns true if something is ready to be read,
|
||||
// false if there is not.
|
||||
func (poller *fdPoller) wait() (bool, error) {
|
||||
// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
|
||||
// I don't know whether epoll_wait returns the number of events returned,
|
||||
// or the total number of events ready.
|
||||
// I decided to catch both by making the buffer one larger than the maximum.
|
||||
events := make([]unix.EpollEvent, 7)
|
||||
for {
|
||||
n, errno := unix.EpollWait(poller.epfd, events, -1)
|
||||
if n == -1 {
|
||||
if errno == unix.EINTR {
|
||||
continue
|
||||
}
|
||||
return false, errno
|
||||
}
|
||||
if n == 0 {
|
||||
// If there are no events, try again.
|
||||
continue
|
||||
}
|
||||
if n > 6 {
|
||||
// This should never happen. More events were returned than should be possible.
|
||||
return false, errors.New("epoll_wait returned more events than I know what to do with")
|
||||
}
|
||||
ready := events[:n]
|
||||
epollhup := false
|
||||
epollerr := false
|
||||
epollin := false
|
||||
for _, event := range ready {
|
||||
if event.Fd == int32(poller.fd) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// This should not happen, but if it does, treat it as a wakeup.
|
||||
epollhup = true
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the file descriptor, we should pretend
|
||||
// something is ready to read, and let unix.Read pick up the error.
|
||||
epollerr = true
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// There is data to read.
|
||||
epollin = true
|
||||
}
|
||||
}
|
||||
if event.Fd == int32(poller.pipe[0]) {
|
||||
if event.Events&unix.EPOLLHUP != 0 {
|
||||
// Write pipe descriptor was closed, by us. This means we're closing down the
|
||||
// watcher, and we should wake up.
|
||||
}
|
||||
if event.Events&unix.EPOLLERR != 0 {
|
||||
// If an error is waiting on the pipe file descriptor.
|
||||
// This is an absolute mystery, and should never ever happen.
|
||||
return false, errors.New("Error on the pipe descriptor.")
|
||||
}
|
||||
if event.Events&unix.EPOLLIN != 0 {
|
||||
// This is a regular wakeup, so we have to clear the buffer.
|
||||
err := poller.clearWake()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if epollhup || epollerr || epollin {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Close the write end of the poller.
|
||||
func (poller *fdPoller) wake() error {
|
||||
buf := make([]byte, 1)
|
||||
n, errno := unix.Write(poller.pipe[1], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is full, poller will wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (poller *fdPoller) clearWake() error {
|
||||
// You have to be woken up a LOT in order to get to 100!
|
||||
buf := make([]byte, 100)
|
||||
n, errno := unix.Read(poller.pipe[0], buf)
|
||||
if n == -1 {
|
||||
if errno == unix.EAGAIN {
|
||||
// Buffer is empty, someone else cleared our wake.
|
||||
return nil
|
||||
}
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close all poller file descriptors, but not the one passed to it.
|
||||
func (poller *fdPoller) close() {
|
||||
if poller.pipe[1] != -1 {
|
||||
unix.Close(poller.pipe[1])
|
||||
}
|
||||
if poller.pipe[0] != -1 {
|
||||
unix.Close(poller.pipe[0])
|
||||
}
|
||||
if poller.epfd != -1 {
|
||||
unix.Close(poller.epfd)
|
||||
}
|
||||
}
|
||||
503
vendor/gopkg.in/fsnotify.v1/kqueue.go
generated
vendored
Normal file
503
vendor/gopkg.in/fsnotify.v1/kqueue.go
generated
vendored
Normal file
@@ -0,0 +1,503 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
done chan bool // Channel for sending a "quit message" to the reader goroutine
|
||||
|
||||
kq int // File descriptor (as returned by the kqueue() syscall).
|
||||
|
||||
mu sync.Mutex // Protects access to watcher data
|
||||
watches map[string]int // Map of watched file descriptors (key: path).
|
||||
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||
dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue.
|
||||
paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events.
|
||||
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events).
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
type pathInfo struct {
|
||||
name string
|
||||
isDir bool
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
kq, err := kqueue()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := &Watcher{
|
||||
kq: kq,
|
||||
watches: make(map[string]int),
|
||||
dirFlags: make(map[string]uint32),
|
||||
paths: make(map[int]pathInfo),
|
||||
fileExists: make(map[string]bool),
|
||||
externalWatches: make(map[string]bool),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
done: make(chan bool),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// copy paths to remove while locked
|
||||
w.mu.Lock()
|
||||
var pathsToRemove = make([]string, 0, len(w.watches))
|
||||
for name := range w.watches {
|
||||
pathsToRemove = append(pathsToRemove, name)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
// unlock before calling Remove, which also locks
|
||||
|
||||
var err error
|
||||
for _, name := range pathsToRemove {
|
||||
if e := w.Remove(name); e != nil && err == nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
|
||||
// Send "quit" message to the reader goroutine:
|
||||
w.done <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
w.mu.Lock()
|
||||
w.externalWatches[name] = true
|
||||
w.mu.Unlock()
|
||||
_, err := w.addWatch(name, noteAllEvents)
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
name = filepath.Clean(name)
|
||||
w.mu.Lock()
|
||||
watchfd, ok := w.watches[name]
|
||||
w.mu.Unlock()
|
||||
if !ok {
|
||||
return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
|
||||
}
|
||||
|
||||
const registerRemove = unix.EV_DELETE
|
||||
if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unix.Close(watchfd)
|
||||
|
||||
w.mu.Lock()
|
||||
isDir := w.paths[watchfd].isDir
|
||||
delete(w.watches, name)
|
||||
delete(w.paths, watchfd)
|
||||
delete(w.dirFlags, name)
|
||||
w.mu.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if isDir {
|
||||
var pathsToRemove []string
|
||||
w.mu.Lock()
|
||||
for _, path := range w.paths {
|
||||
wdir, _ := filepath.Split(path.name)
|
||||
if filepath.Clean(wdir) == name {
|
||||
if !w.externalWatches[path.name] {
|
||||
pathsToRemove = append(pathsToRemove, path.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, name := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.Remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
|
||||
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
|
||||
|
||||
// keventWaitTime to block on each read from kevent
|
||||
var keventWaitTime = durationToTimespec(100 * time.Millisecond)
|
||||
|
||||
// addWatch adds name to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
|
||||
func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
|
||||
var isDir bool
|
||||
// Make ./name and name equivalent
|
||||
name = filepath.Clean(name)
|
||||
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return "", errors.New("kevent instance already closed")
|
||||
}
|
||||
watchfd, alreadyWatching := w.watches[name]
|
||||
// We already have a watch, but we can still override flags.
|
||||
if alreadyWatching {
|
||||
isDir = w.paths[watchfd].isDir
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
if !alreadyWatching {
|
||||
fi, err := os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Don't watch sockets.
|
||||
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Don't watch named pipes.
|
||||
if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||
// consistency, we will act like everything is fine. There will simply
|
||||
// be no file events for broken symlinks.
|
||||
// Hence the returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
name, err = filepath.EvalSymlinks(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
_, alreadyWatching = w.watches[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
if alreadyWatching {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
fi, err = os.Lstat(name)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
||||
watchfd, err = unix.Open(name, openMode, 0700)
|
||||
if watchfd == -1 {
|
||||
return "", err
|
||||
}
|
||||
|
||||
isDir = fi.IsDir()
|
||||
}
|
||||
|
||||
const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
|
||||
if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
|
||||
unix.Close(watchfd)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !alreadyWatching {
|
||||
w.mu.Lock()
|
||||
w.watches[name] = watchfd
|
||||
w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if isDir {
|
||||
// Watch the directory if it has not been watched before,
|
||||
// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
|
||||
w.mu.Lock()
|
||||
|
||||
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
|
||||
(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
|
||||
// Store flags so this watch can be updated later
|
||||
w.dirFlags[name] = flags
|
||||
w.mu.Unlock()
|
||||
|
||||
if watchDir {
|
||||
if err := w.watchDirectoryFiles(name); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// readEvents reads from kqueue and converts the received kevents into
|
||||
// Event values that it sends down the Events channel.
|
||||
func (w *Watcher) readEvents() {
|
||||
eventBuffer := make([]unix.Kevent_t, 10)
|
||||
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
select {
|
||||
case <-w.done:
|
||||
err := unix.Close(w.kq)
|
||||
if err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Get new events
|
||||
kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
|
||||
// EINTR is okay, the syscall was interrupted before timeout expired.
|
||||
if err != nil && err != unix.EINTR {
|
||||
w.Errors <- err
|
||||
continue
|
||||
}
|
||||
|
||||
// Flush the events we received to the Events channel
|
||||
for len(kevents) > 0 {
|
||||
kevent := &kevents[0]
|
||||
watchfd := int(kevent.Ident)
|
||||
mask := uint32(kevent.Fflags)
|
||||
w.mu.Lock()
|
||||
path := w.paths[watchfd]
|
||||
w.mu.Unlock()
|
||||
event := newEvent(path.name, mask)
|
||||
|
||||
if path.isDir && !(event.Op&Remove == Remove) {
|
||||
// Double check to make sure the directory exists. This can happen when
|
||||
// we do a rm -fr on a recursively watched folders and we receive a
|
||||
// modification event first but the folder has been deleted and later
|
||||
// receive the delete event
|
||||
if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
|
||||
// mark is as delete event
|
||||
event.Op |= Remove
|
||||
}
|
||||
}
|
||||
|
||||
if event.Op&Rename == Rename || event.Op&Remove == Remove {
|
||||
w.Remove(event.Name)
|
||||
w.mu.Lock()
|
||||
delete(w.fileExists, event.Name)
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
|
||||
w.sendDirectoryChangeEvents(event.Name)
|
||||
} else {
|
||||
// Send the event on the Events channel
|
||||
w.Events <- event
|
||||
}
|
||||
|
||||
if event.Op&Remove == Remove {
|
||||
// Look for a file that may have overwritten this.
|
||||
// For example, mv f1 f2 will delete f2, then create f2.
|
||||
if path.isDir {
|
||||
fileDir := filepath.Clean(event.Name)
|
||||
w.mu.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
// make sure the directory exists before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch from the parent directory.
|
||||
if _, err := os.Lstat(fileDir); err == nil {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
filePath := filepath.Clean(event.Name)
|
||||
if fileInfo, err := os.Lstat(filePath); err == nil {
|
||||
w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move to next event
|
||||
kevents = kevents[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newEvent returns an platform-independent Event based on kqueue Fflags.
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
func newCreateEvent(name string) Event {
|
||||
return Event{Name: name, Op: Create}
|
||||
}
|
||||
|
||||
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendDirectoryEvents searches the directory for newly created files
|
||||
// and sends them over the event channel. This functionality is to have
|
||||
// the BSD version of fsnotify match Linux inotify which provides a
|
||||
// create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
|
||||
func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
|
||||
w.mu.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.mu.Unlock()
|
||||
if !doesExist {
|
||||
// Send create event
|
||||
w.Events <- newCreateEvent(filePath)
|
||||
}
|
||||
|
||||
// like watchDirectoryFiles (but without doing another ReadDir)
|
||||
filePath, err = w.internalWatch(filePath, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
|
||||
if fileInfo.IsDir() {
|
||||
// mimic Linux providing delete events for subdirectories
|
||||
// but preserve the flags used if currently watching subdirectory
|
||||
w.mu.Lock()
|
||||
flags := w.dirFlags[name]
|
||||
w.mu.Unlock()
|
||||
|
||||
flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
|
||||
return w.addWatch(name, flags)
|
||||
}
|
||||
|
||||
// watch file to mimic Linux inotify
|
||||
return w.addWatch(name, noteAllEvents)
|
||||
}
|
||||
|
||||
// kqueue creates a new kernel event queue and returns a descriptor.
|
||||
func kqueue() (kq int, err error) {
|
||||
kq, err = unix.Kqueue()
|
||||
if kq == -1 {
|
||||
return kq, err
|
||||
}
|
||||
return kq, nil
|
||||
}
|
||||
|
||||
// register events with the queue
|
||||
func register(kq int, fds []int, flags int, fflags uint32) error {
|
||||
changes := make([]unix.Kevent_t, len(fds))
|
||||
|
||||
for i, fd := range fds {
|
||||
// SetKevent converts int to the platform-specific types:
|
||||
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
|
||||
changes[i].Fflags = fflags
|
||||
}
|
||||
|
||||
// register the events
|
||||
success, err := unix.Kevent(kq, changes, nil, nil)
|
||||
if success == -1 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// read retrieves pending events, or waits until an event occurs.
|
||||
// A timeout of nil blocks indefinitely, while 0 polls the queue.
|
||||
func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
|
||||
n, err := unix.Kevent(kq, nil, events, timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return events[0:n], nil
|
||||
}
|
||||
|
||||
// durationToTimespec prepares a timeout value
|
||||
func durationToTimespec(d time.Duration) unix.Timespec {
|
||||
return unix.NsecToTimespec(d.Nanoseconds())
|
||||
}
|
||||
11
vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go
generated
vendored
Normal file
11
vendor/gopkg.in/fsnotify.v1/open_mode_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY
|
||||
12
vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go
generated
vendored
Normal file
12
vendor/gopkg.in/fsnotify.v1/open_mode_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
// note: this constant is not defined on BSD
|
||||
const openMode = unix.O_EVTONLY
|
||||
561
vendor/gopkg.in/fsnotify.v1/windows.go
generated
vendored
Normal file
561
vendor/gopkg.in/fsnotify.v1/windows.go
generated
vendored
Normal file
@@ -0,0 +1,561 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Watcher watches a set of files, delivering events to a channel.
|
||||
type Watcher struct {
|
||||
Events chan Event
|
||||
Errors chan error
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
mu sync.Mutex // Map access
|
||||
port syscall.Handle // Handle to completion port
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
quit chan chan<- error
|
||||
}
|
||||
|
||||
// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
input: make(chan *input, 1),
|
||||
Events: make(chan Event, 50),
|
||||
Errors: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close removes all watches and closes the events channel.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// Add starts watching the named file or directory (non-recursively).
|
||||
func (w *Watcher) Add(name string) error {
|
||||
if w.isClosed {
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(name),
|
||||
flags: sysFSALLEVENTS,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Remove stops watching the the named file or directory (non-recursively).
|
||||
func (w *Watcher) Remove(name string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(name),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sysFSONESHOT = 0x80000000
|
||||
sysFSONLYDIR = 0x1000000
|
||||
|
||||
// Events
|
||||
sysFSACCESS = 0x1
|
||||
sysFSALLEVENTS = 0xfff
|
||||
sysFSATTRIB = 0x4
|
||||
sysFSCLOSE = 0x18
|
||||
sysFSCREATE = 0x100
|
||||
sysFSDELETE = 0x200
|
||||
sysFSDELETESELF = 0x400
|
||||
sysFSMODIFY = 0x2
|
||||
sysFSMOVE = 0xc0
|
||||
sysFSMOVEDFROM = 0x40
|
||||
sysFSMOVEDTO = 0x80
|
||||
sysFSMOVESELF = 0x800
|
||||
|
||||
// Special events
|
||||
sysFSIGNORED = 0x8000
|
||||
sysFSQOVERFLOW = 0x4000
|
||||
)
|
||||
|
||||
func newEvent(name string, mask uint32) Event {
|
||||
e := Event{Name: name}
|
||||
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
|
||||
e.Op |= Create
|
||||
}
|
||||
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
|
||||
e.Op |= Remove
|
||||
}
|
||||
if mask&sysFSMODIFY == sysFSMODIFY {
|
||||
e.Op |= Write
|
||||
}
|
||||
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
|
||||
e.Op |= Rename
|
||||
}
|
||||
if mask&sysFSATTRIB == sysFSATTRIB {
|
||||
e.Op |= Chmod
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle syscall.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov syscall.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
type indexMap map[uint64]*watch
|
||||
type watchMap map[uint32]indexMap
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if e != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDir(pathname string) (dir string, err error) {
|
||||
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||
if e != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||
}
|
||||
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getIno(path string) (ino *inode, err error) {
|
||||
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||
syscall.FILE_LIST_DIRECTORY,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||
nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", e)
|
||||
}
|
||||
var fi syscall.ByHandleFileInformation
|
||||
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||
syscall.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&sysFSONLYDIR != 0 && pathname != dir {
|
||||
return nil
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
if err = w.startRead(watchEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watch == nil {
|
||||
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CancelIo", e)
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||
w.Errors <- os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if e != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Events channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n, key uint32
|
||||
ov *syscall.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if e := syscall.CloseHandle(w.port); e != nil {
|
||||
err = os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
close(w.Events)
|
||||
close(w.Errors)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e {
|
||||
case syscall.ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case syscall.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case syscall.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.Events <- newEvent("", sysFSQOVERFLOW)
|
||||
w.Errors <- errors.New("short read in readEvents()")
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||
fullname := filepath.Join(watch.path, name)
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
mask = sysFSDELETESELF
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
mask = sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sysFSMOVESELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||
if watch.names[name]&sysFSONESHOT != 0 {
|
||||
delete(watch.names, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||
if watch.mask&sysFSONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = filepath.Join(watch.path, watch.rename)
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.Errors <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
event := newEvent(name, uint32(mask))
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Events <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sysFSACCESS != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||
}
|
||||
if mask&sysFSMODIFY != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sysFSATTRIB != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case syscall.FILE_ACTION_ADDED:
|
||||
return sysFSCREATE
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
return sysFSDELETE
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
return sysFSMODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sysFSMOVEDFROM
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sysFSMOVEDTO
|
||||
}
|
||||
return 0
|
||||
}
|
||||
29
vendor/gopkg.in/tomb.v1/LICENSE
generated
vendored
Normal file
29
vendor/gopkg.in/tomb.v1/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
tomb - support for clean goroutine termination in Go.
|
||||
|
||||
Copyright (c) 2010-2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
4
vendor/gopkg.in/tomb.v1/README.md
generated
vendored
Normal file
4
vendor/gopkg.in/tomb.v1/README.md
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
Installation and usage
|
||||
----------------------
|
||||
|
||||
See [gopkg.in/tomb.v1](https://gopkg.in/tomb.v1) for documentation and usage details.
|
||||
176
vendor/gopkg.in/tomb.v1/tomb.go
generated
vendored
Normal file
176
vendor/gopkg.in/tomb.v1/tomb.go
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
// Copyright (c) 2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
|
||||
//
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of the copyright holder nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
||||
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The tomb package offers a conventional API for clean goroutine termination.
|
||||
//
|
||||
// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead,
|
||||
// and the reason for its death.
|
||||
//
|
||||
// The zero value of a Tomb assumes that a goroutine is about to be
|
||||
// created or already alive. Once Kill or Killf is called with an
|
||||
// argument that informs the reason for death, the goroutine is in
|
||||
// a dying state and is expected to terminate soon. Right before the
|
||||
// goroutine function or method returns, Done must be called to inform
|
||||
// that the goroutine is indeed dead and about to stop running.
|
||||
//
|
||||
// A Tomb exposes Dying and Dead channels. These channels are closed
|
||||
// when the Tomb state changes in the respective way. They enable
|
||||
// explicit blocking until the state changes, and also to selectively
|
||||
// unblock select statements accordingly.
|
||||
//
|
||||
// When the tomb state changes to dying and there's still logic going
|
||||
// on within the goroutine, nested functions and methods may choose to
|
||||
// return ErrDying as their error value, as this error won't alter the
|
||||
// tomb state if provided to the Kill method. This is a convenient way to
|
||||
// follow standard Go practices in the context of a dying tomb.
|
||||
//
|
||||
// For background and a detailed example, see the following blog post:
|
||||
//
|
||||
// http://blog.labix.org/2011/10/09/death-of-goroutines-under-control
|
||||
//
|
||||
// For a more complex code snippet demonstrating the use of multiple
|
||||
// goroutines with a single Tomb, see:
|
||||
//
|
||||
// http://play.golang.org/p/Xh7qWsDPZP
|
||||
//
|
||||
package tomb
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A Tomb tracks the lifecycle of a goroutine as alive, dying or dead,
|
||||
// and the reason for its death.
|
||||
//
|
||||
// See the package documentation for details.
|
||||
type Tomb struct {
|
||||
m sync.Mutex
|
||||
dying chan struct{}
|
||||
dead chan struct{}
|
||||
reason error
|
||||
}
|
||||
|
||||
var (
|
||||
ErrStillAlive = errors.New("tomb: still alive")
|
||||
ErrDying = errors.New("tomb: dying")
|
||||
)
|
||||
|
||||
func (t *Tomb) init() {
|
||||
t.m.Lock()
|
||||
if t.dead == nil {
|
||||
t.dead = make(chan struct{})
|
||||
t.dying = make(chan struct{})
|
||||
t.reason = ErrStillAlive
|
||||
}
|
||||
t.m.Unlock()
|
||||
}
|
||||
|
||||
// Dead returns the channel that can be used to wait
|
||||
// until t.Done has been called.
|
||||
func (t *Tomb) Dead() <-chan struct{} {
|
||||
t.init()
|
||||
return t.dead
|
||||
}
|
||||
|
||||
// Dying returns the channel that can be used to wait
|
||||
// until t.Kill or t.Done has been called.
|
||||
func (t *Tomb) Dying() <-chan struct{} {
|
||||
t.init()
|
||||
return t.dying
|
||||
}
|
||||
|
||||
// Wait blocks until the goroutine is in a dead state and returns the
|
||||
// reason for its death.
|
||||
func (t *Tomb) Wait() error {
|
||||
t.init()
|
||||
<-t.dead
|
||||
t.m.Lock()
|
||||
reason := t.reason
|
||||
t.m.Unlock()
|
||||
return reason
|
||||
}
|
||||
|
||||
// Done flags the goroutine as dead, and should be called a single time
|
||||
// right before the goroutine function or method returns.
|
||||
// If the goroutine was not already in a dying state before Done is
|
||||
// called, it will be flagged as dying and dead at once with no
|
||||
// error.
|
||||
func (t *Tomb) Done() {
|
||||
t.Kill(nil)
|
||||
close(t.dead)
|
||||
}
|
||||
|
||||
// Kill flags the goroutine as dying for the given reason.
|
||||
// Kill may be called multiple times, but only the first
|
||||
// non-nil error is recorded as the reason for termination.
|
||||
//
|
||||
// If reason is ErrDying, the previous reason isn't replaced
|
||||
// even if it is nil. It's a runtime error to call Kill with
|
||||
// ErrDying if t is not in a dying state.
|
||||
func (t *Tomb) Kill(reason error) {
|
||||
t.init()
|
||||
t.m.Lock()
|
||||
defer t.m.Unlock()
|
||||
if reason == ErrDying {
|
||||
if t.reason == ErrStillAlive {
|
||||
panic("tomb: Kill with ErrDying while still alive")
|
||||
}
|
||||
return
|
||||
}
|
||||
if t.reason == nil || t.reason == ErrStillAlive {
|
||||
t.reason = reason
|
||||
}
|
||||
// If the receive on t.dying succeeds, then
|
||||
// it can only be because we have already closed it.
|
||||
// If it blocks, then we know that it needs to be closed.
|
||||
select {
|
||||
case <-t.dying:
|
||||
default:
|
||||
close(t.dying)
|
||||
}
|
||||
}
|
||||
|
||||
// Killf works like Kill, but builds the reason providing the received
|
||||
// arguments to fmt.Errorf. The generated error is also returned.
|
||||
func (t *Tomb) Killf(f string, a ...interface{}) error {
|
||||
err := fmt.Errorf(f, a...)
|
||||
t.Kill(err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Err returns the reason for the goroutine death provided via Kill
|
||||
// or Killf, or ErrStillAlive when the goroutine is still alive.
|
||||
func (t *Tomb) Err() (reason error) {
|
||||
t.init()
|
||||
t.m.Lock()
|
||||
reason = t.reason
|
||||
t.m.Unlock()
|
||||
return
|
||||
}
|
||||
24
vendor/vendor.json
vendored
24
vendor/vendor.json
vendored
@@ -580,6 +580,18 @@
|
||||
"revision": "badf81fca035b8ebac61b5ab83330b72541056f4",
|
||||
"revisionTime": "2016-06-09T13:59:02Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "0xM336Lb25URO/1W1/CtGoRygVU=",
|
||||
"path": "github.com/hpcloud/tail/util",
|
||||
"revision": "a30252cb686a21eb2d0b98132633053ec2f7f1e5",
|
||||
"revisionTime": "2016-04-28T00:30:50Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "mPYzD3FBUUDZEtVGJpOv1+Uf5ss=",
|
||||
"path": "github.com/hpcloud/tail/watch",
|
||||
"revision": "a30252cb686a21eb2d0b98132633053ec2f7f1e5",
|
||||
"revisionTime": "2016-04-28T00:30:50Z"
|
||||
},
|
||||
{
|
||||
"comment": "0.2.2-2-gc01cf91",
|
||||
"path": "github.com/jmespath/go-jmespath",
|
||||
@@ -752,6 +764,18 @@
|
||||
"path": "golang.org/x/sys/windows",
|
||||
"revision": "b776ec39b3e54652e09028aaaaac9757f4f8211a",
|
||||
"revisionTime": "2016-04-21T02:29:30Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "93uHIq25lffEKY47PV8dBPD+XuQ=",
|
||||
"path": "gopkg.in/fsnotify.v1",
|
||||
"revision": "a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb",
|
||||
"revisionTime": "2016-06-29T01:11:04Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "TO8baX+t1Qs7EmOYth80MkbKzFo=",
|
||||
"path": "gopkg.in/tomb.v1",
|
||||
"revision": "dd632973f1e7218eb1089048e0798ec9ae7dceb8",
|
||||
"revisionTime": "2014-10-24T13:56:13Z"
|
||||
}
|
||||
],
|
||||
"rootPath": "github.com/hashicorp/nomad"
|
||||
|
||||
Reference in New Issue
Block a user