Rework with go

This commit is contained in:
Dmitrii Andreev
2025-04-07 11:15:05 +03:00
parent 857808895d
commit f64163d12a
698 changed files with 601639 additions and 490 deletions

View File

@@ -1 +1,3 @@
.github .github
bin
.idea

40
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,40 @@
name: Go CI
on:
push:
branches: [main, master]
pull_request:
branches: [main, master]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.22" # Specify your Go version
- name: Install golangci-lint
uses: golangci/golangci-lint-action@v6
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest # Or pin to a specific version like v1.58.1
- name: Display Go version
run: go version
- name: Display golangci-lint version
run: golangci-lint --version
- name: Build
run: make build
- name: Lint
run: make lint # Uses .golangci.yml configuration
# - name: Test # Uncomment when tests are added
# run: make test

37
.gitignore vendored
View File

@@ -1,19 +1,30 @@
records records
bin
# Byte-compiled / optimized / DLL files # If you prefer the allow list template instead of the deny list, see community template:
__pycache__/ # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
*.py[cod] #
*$py.class # Binaries for programs and plugins
*.exe
# C extensions *.exe~
*.dll
*.so *.so
# Environments *.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
go.work.sum
# env file
.env .env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
*.swp *.swp

View File

@@ -1,11 +1,46 @@
FROM python:3.11-slim # Stage 1: Build the application
FROM golang:1.22-alpine AS builder
WORKDIR /app WORKDIR /app
# Copy go mod and sum files
COPY go.mod go.sum ./
# Download dependencies
RUN go mod download
# Copy the source code
COPY . . COPY . .
RUN pip install --no-cache-dir -r requirements.txt # Build the application
# CGO_ENABLED=0 is important for static linking, especially with sqlite and alpine
# -ldflags="-w -s" strips debug information, reducing binary size
RUN CGO_ENABLED=0 go build -ldflags="-w -s" -o /icecast-ripper ./cmd/icecast-ripper/main.go
# Stage 2: Create the final lightweight image
FROM alpine:latest
WORKDIR /app
# Copy the binary from the builder stage
COPY --from=builder /icecast-ripper /app/icecast-ripper
# Create directories for recordings, temp files, and database if needed inside the container
# These should ideally be mounted as volumes in production
RUN mkdir -p /app/recordings /app/temp
# Expose the server port (if different from default, adjust accordingly)
EXPOSE 8080 EXPOSE 8080
CMD [ "python", "./src/main.py" ] # Set default environment variables (can be overridden)
ENV DATABASE_PATH=/app/icecast-ripper.db
ENV RECORDINGS_PATH=/app/recordings
ENV TEMP_PATH=/app/temp
ENV SERVER_ADDRESS=:8080
ENV LOG_LEVEL=info
# ENV STREAM_URL= # Required: Must be set at runtime
# ENV CHECK_INTERVAL=1m # Optional: Defaults to 1m
# ENV RSS_FEED_URL= # Optional: Defaults to http://<container_ip>:8080/rss
# Command to run the application
ENTRYPOINT ["/app/icecast-ripper"]

View File

@@ -1,22 +1,55 @@
run: # Simple Makefile for Go project
@echo "Starting Icecast Ripper Service"
python src/main.py
test: # Go parameters
@echo "Running tests" GOCMD=go
python -m unittest discover -s tests/ GOBUILD=$(GOCMD) build
GOCLEAN=$(GOCMD) clean
GOTEST=$(GOCMD) test
GOGET=$(GOCMD) get
GORUN=$(GOCMD) run
GOLINT=golangci-lint
# Binary name
BINARY_NAME=icecast-ripper
BINARY_PATH=./bin/$(BINARY_NAME)
# Source files entrypoint
MAIN_GO=./cmd/icecast-ripper/main.go
# Default target
all: build
# Build the application
build: build:
@echo "Building Docker image for Icecast Ripper Service" @echo "Building $(BINARY_NAME)..."
docker build -t icecast-ripper . $(GOBUILD) -o $(BINARY_PATH) $(MAIN_GO)
@echo "$(BINARY_NAME) built successfully."
docker-run: build # Run the application
@echo "Running Icecast Ripper Service in a Docker container" run: build
docker run -p 8080:8080 --env-file .env.example icecast-ripper @echo "Running $(BINARY_NAME)..."
$(BINARY_PATH)
# Run linters
lint:
@echo "Running linters..."
$(GOLINT) run ./...
# Run tests
test:
@echo "Running tests..."
$(GOTEST) -v ./...
# Clean build artifacts
clean: clean:
@echo "Cleaning up pycache and .pyc files" @echo "Cleaning..."
find . -type d -name pycache -exec rm -r {} + $(GOCLEAN)
find . -type f -name '*.pyc' -delete rm -f $(BINARY_PATH)
@echo "Cleaned."
.PHONY: run test build docker-run clean # Get dependencies
deps:
@echo "Getting dependencies..."
$(GOGET) ./...
.PHONY: all build run lint test clean deps

103
cmd/icecast-ripper/main.go Normal file
View File

@@ -0,0 +1,103 @@
package main
import (
"context"
"fmt"
"log/slog"
"os"
"os/signal"
"syscall"
"time"
"github.com/kemko/icecast-ripper/internal/config"
"github.com/kemko/icecast-ripper/internal/database"
"github.com/kemko/icecast-ripper/internal/logger"
"github.com/kemko/icecast-ripper/internal/recorder"
"github.com/kemko/icecast-ripper/internal/rss"
"github.com/kemko/icecast-ripper/internal/scheduler"
"github.com/kemko/icecast-ripper/internal/server"
"github.com/kemko/icecast-ripper/internal/streamchecker"
)
func main() {
// Load configuration
cfg, err := config.LoadConfig()
if err != nil {
_, err2 := fmt.Fprintf(os.Stderr, "Error loading configuration: %v\n", err)
if err2 != nil {
return
}
os.Exit(1)
}
// Setup logger
logger.Setup(cfg.LogLevel)
slog.Info("Starting icecast-ripper", "version", "0.1.0") // Updated version
// Initialize database
db, err := database.InitDB(cfg.DatabasePath)
if err != nil {
slog.Error("Failed to initialize database", "error", err)
os.Exit(1)
}
defer func() {
if err := db.Close(); err != nil {
slog.Error("Failed to close database", "error", err)
}
}()
// Create a context that cancels on shutdown signal
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer stop() // Ensure context is cancelled even if not explicitly stopped later
// --- Initialize components ---
slog.Info("Initializing components...")
streamChecker := streamchecker.New(cfg.StreamURL)
recorderInstance, err := recorder.New(cfg.TempPath, cfg.RecordingsPath, db)
if err != nil {
slog.Error("Failed to initialize recorder", "error", err)
os.Exit(1)
}
rssGenerator := rss.New(db, cfg, "Icecast Recordings", "Recordings from stream: "+cfg.StreamURL)
schedulerInstance := scheduler.New(cfg.CheckInterval, streamChecker, recorderInstance)
httpServer := server.New(cfg, rssGenerator)
// --- Start components ---
slog.Info("Starting services...")
schedulerInstance.Start(ctx) // Pass the main context to the scheduler
if err := httpServer.Start(); err != nil {
slog.Error("Failed to start HTTP server", "error", err)
stop() // Trigger shutdown if server fails to start
os.Exit(1)
}
slog.Info("Application started successfully. Press Ctrl+C to shut down.")
// Wait for context cancellation (due to signal)
<-ctx.Done()
slog.Info("Shutting down application...")
// --- Graceful shutdown ---
// Create a shutdown context with a timeout
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer shutdownCancel()
// Stop scheduler first (prevents new recordings)
schedulerInstance.Stop() // This waits for the scheduler loop to exit
// Stop the recorder (ensures any ongoing recording is finalized or cancelled)
// Note: Stopping the scheduler doesn't automatically stop an *ongoing* recording.
// We need to explicitly stop the recorder if we want it to terminate immediately.
recorderInstance.StopRecording() // Signal any active recording to stop
// Stop the HTTP server
if err := httpServer.Stop(shutdownCtx); err != nil {
slog.Warn("HTTP server shutdown error", "error", err)
}
// Database is closed by the deferred function call
slog.Info("Application shut down gracefully")
}

24
go.mod Normal file
View File

@@ -0,0 +1,24 @@
module github.com/kemko/icecast-ripper
go 1.24.2
require (
github.com/mattn/go-sqlite3 v1.14.27
github.com/spf13/viper v1.20.1
)
require (
github.com/fsnotify/fsnotify v1.9.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/sagikazarmark/locafero v0.9.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.14.0 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/text v0.24.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

49
go.sum Normal file
View File

@@ -0,0 +1,49 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU=
github.com/mattn/go-sqlite3 v1.14.27/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

77
internal/config/config.go Normal file
View File

@@ -0,0 +1,77 @@
package config
import (
"strings"
"time"
"github.com/spf13/viper"
)
// Config stores all configuration for the application.
// The values are read by viper from environment variables or a config file.
type Config struct {
StreamURL string `mapstructure:"STREAM_URL"`
CheckInterval time.Duration `mapstructure:"CHECK_INTERVAL"`
RecordingsPath string `mapstructure:"RECORDINGS_PATH"`
TempPath string `mapstructure:"TEMP_PATH"`
DatabasePath string `mapstructure:"DATABASE_PATH"`
ServerAddress string `mapstructure:"SERVER_ADDRESS"`
RSSFeedURL string `mapstructure:"RSS_FEED_URL"` // Base URL for the RSS feed links
LogLevel string `mapstructure:"LOG_LEVEL"`
}
// LoadConfig reads configuration from environment variables.
func LoadConfig() (*Config, error) {
viper.AutomaticEnv()
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
// Set default values
viper.SetDefault("STREAM_URL", "")
viper.SetDefault("CHECK_INTERVAL", "1m")
viper.SetDefault("RECORDINGS_PATH", "./recordings")
viper.SetDefault("TEMP_PATH", "./temp")
viper.SetDefault("DATABASE_PATH", "./icecast-ripper.db")
viper.SetDefault("SERVER_ADDRESS", ":8080")
viper.SetDefault("RSS_FEED_URL", "http://localhost:8080/rss") // Example default
viper.SetDefault("LOG_LEVEL", "info")
var config Config
// Bind environment variables to struct fields
if err := viper.BindEnv("STREAM_URL"); err != nil {
return nil, err
}
if err := viper.BindEnv("CHECK_INTERVAL"); err != nil {
return nil, err
}
if err := viper.BindEnv("RECORDINGS_PATH"); err != nil {
return nil, err
}
if err := viper.BindEnv("TEMP_PATH"); err != nil {
return nil, err
}
if err := viper.BindEnv("DATABASE_PATH"); err != nil {
return nil, err
}
if err := viper.BindEnv("SERVER_ADDRESS"); err != nil {
return nil, err
}
if err := viper.BindEnv("RSS_FEED_URL"); err != nil {
return nil, err
}
if err := viper.BindEnv("LOG_LEVEL"); err != nil {
return nil, err
}
// Unmarshal the config
if err := viper.Unmarshal(&config); err != nil {
return nil, err
}
// Ensure required fields are set
if config.StreamURL == "" {
// Consider returning an error or logging a fatal error if essential config is missing
// return nil, errors.New("STREAM_URL environment variable is required")
}
return &config, nil
}

View File

@@ -0,0 +1,114 @@
package database
import (
"database/sql"
"fmt"
"log/slog"
"time"
_ "github.com/mattn/go-sqlite3" // SQLite driver
)
// DB represents the database connection.
type DB struct {
*sql.DB
}
// RecordedFile represents a row in the recorded_files table.
type RecordedFile struct {
ID int64
Filename string
Hash string
FileSize int64 // Size in bytes
Duration time.Duration // Duration of the recording
RecordedAt time.Time
}
// InitDB initializes the SQLite database connection and creates tables if they don't exist.
func InitDB(dataSourceName string) (*DB, error) {
slog.Info("Initializing database connection", "path", dataSourceName)
db, err := sql.Open("sqlite3", dataSourceName)
if err != nil {
return nil, fmt.Errorf("failed to open database: %w", err)
}
if err = db.Ping(); err != nil {
return nil, fmt.Errorf("failed to connect to database: %w", err)
}
// Create table if not exists
query := `
CREATE TABLE IF NOT EXISTS recorded_files (
id INTEGER PRIMARY KEY AUTOINCREMENT,
filename TEXT NOT NULL UNIQUE,
hash TEXT NOT NULL UNIQUE,
filesize INTEGER NOT NULL,
duration INTEGER NOT NULL, -- Store duration in seconds or milliseconds
recorded_at DATETIME NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_recorded_at ON recorded_files (recorded_at);
`
if _, err = db.Exec(query); err != nil {
return nil, fmt.Errorf("failed to create table: %w", err)
}
slog.Info("Database initialized successfully")
return &DB{db}, nil
}
// AddRecordedFile inserts a new record into the database.
func (db *DB) AddRecordedFile(filename, hash string, fileSize int64, duration time.Duration, recordedAt time.Time) (int64, error) {
result, err := db.Exec(
"INSERT INTO recorded_files (filename, hash, filesize, duration, recorded_at) VALUES (?, ?, ?, ?, ?)",
filename, hash, fileSize, int64(duration.Seconds()), recordedAt, // Store duration as seconds
)
if err != nil {
return 0, fmt.Errorf("failed to insert recorded file: %w", err)
}
id, err := result.LastInsertId()
if err != nil {
return 0, fmt.Errorf("failed to get last insert ID: %w", err)
}
slog.Debug("Added recorded file to database", "id", id, "filename", filename, "hash", hash)
return id, nil
}
// GetRecordedFiles retrieves all recorded files, ordered by recording date descending.
func (db *DB) GetRecordedFiles(limit int) ([]RecordedFile, error) {
query := "SELECT id, filename, hash, filesize, duration, recorded_at FROM recorded_files ORDER BY recorded_at DESC"
args := []interface{}{}
if limit > 0 {
query += " LIMIT ?"
args = append(args, limit)
}
rows, err := db.Query(query, args...)
if err != nil {
return nil, fmt.Errorf("failed to query recorded files: %w", err)
}
defer rows.Close()
var files []RecordedFile
for rows.Next() {
var rf RecordedFile
var durationSeconds int64
if err := rows.Scan(&rf.ID, &rf.Filename, &rf.Hash, &rf.FileSize, &durationSeconds, &rf.RecordedAt); err != nil {
return nil, fmt.Errorf("failed to scan recorded file row: %w", err)
}
rf.Duration = time.Duration(durationSeconds) * time.Second
files = append(files, rf)
}
if err = rows.Err(); err != nil {
return nil, fmt.Errorf("error iterating over recorded file rows: %w", err)
}
return files, nil
}
// Close closes the database connection.
func (db *DB) Close() error {
slog.Info("Closing database connection")
return db.DB.Close()
}

55
internal/hash/hash.go Normal file
View File

@@ -0,0 +1,55 @@
package hash
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"log/slog"
"os"
"path/filepath"
)
const (
// hashChunkSize defines how many bytes of the file to read for hashing.
// 1MB should be sufficient for uniqueness in most cases while being fast.
hashChunkSize = 1 * 1024 * 1024 // 1 MB
)
// GenerateFileHash generates a SHA256 hash based on the filename and the first `hashChunkSize` bytes of the file content.
// This avoids reading the entire file for large recordings.
func GenerateFileHash(filePath string) (string, error) {
filename := filepath.Base(filePath)
slog.Debug("Generating hash", "file", filePath, "chunkSize", hashChunkSize)
file, err := os.Open(filePath)
if err != nil {
return "", fmt.Errorf("failed to open file %s for hashing: %w", filePath, err)
}
defer file.Close()
hasher := sha256.New()
// Include filename in the hash
if _, err := hasher.Write([]byte(filename)); err != nil {
return "", fmt.Errorf("failed to write filename to hasher: %w", err)
}
// Read only the first part of the file
chunk := make([]byte, hashChunkSize)
bytesRead, err := file.Read(chunk)
if err != nil && err != io.EOF {
return "", fmt.Errorf("failed to read file chunk %s: %w", filePath, err)
}
// Hash the chunk that was read
if _, err := hasher.Write(chunk[:bytesRead]); err != nil {
return "", fmt.Errorf("failed to write file chunk to hasher: %w", err)
}
hashBytes := hasher.Sum(nil)
hashString := hex.EncodeToString(hashBytes)
slog.Debug("Generated hash", "file", filePath, "hash", hashString)
return hashString, nil
}

30
internal/logger/logger.go Normal file
View File

@@ -0,0 +1,30 @@
package logger
import (
"log/slog"
"os"
"strings"
)
// Setup initializes the structured logger.
func Setup(logLevel string) {
var level slog.Level
switch strings.ToLower(logLevel) {
case "debug":
level = slog.LevelDebug
case "warn":
level = slog.LevelWarn
case "error":
level = slog.LevelError
default:
level = slog.LevelInfo
}
opts := &slog.HandlerOptions{
Level: level,
}
handler := slog.NewJSONHandler(os.Stdout, opts) // Or slog.NewTextHandler
slog.SetDefault(slog.New(handler))
slog.Info("Logger initialized", "level", level.String())
}

View File

@@ -0,0 +1,256 @@
package recorder
import (
"context"
"fmt"
"io"
"log/slog"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/kemko/icecast-ripper/internal/database"
"github.com/kemko/icecast-ripper/internal/hash"
)
// Recorder handles downloading the stream and saving recordings.
type Recorder struct {
tempPath string
recordingsPath string
db *database.DB
client *http.Client
mu sync.Mutex // Protects access to isRecording
isRecording bool
cancelFunc context.CancelFunc // To stop the current recording
}
// New creates a new Recorder instance.
func New(tempPath, recordingsPath string, db *database.DB) (*Recorder, error) {
// Ensure temp and recordings directories exist
if err := os.MkdirAll(tempPath, 0755); err != nil {
return nil, fmt.Errorf("failed to create temp directory %s: %w", tempPath, err)
}
if err := os.MkdirAll(recordingsPath, 0755); err != nil {
return nil, fmt.Errorf("failed to create recordings directory %s: %w", recordingsPath, err)
}
return &Recorder{
tempPath: tempPath,
recordingsPath: recordingsPath,
db: db,
client: &http.Client{
// Use a longer timeout for downloading the stream
Timeout: 0, // No timeout for the download itself, rely on context cancellation
},
}, nil
}
// IsRecording returns true if a recording is currently in progress.
func (r *Recorder) IsRecording() bool {
r.mu.Lock()
defer r.mu.Unlock()
return r.isRecording
}
// StartRecording starts downloading the stream to a temporary file.
// It runs asynchronously and returns immediately.
func (r *Recorder) StartRecording(ctx context.Context, streamURL string) error {
r.mu.Lock()
if r.isRecording {
r.mu.Unlock()
return fmt.Errorf("recording already in progress")
}
// Create a new context that can be cancelled to stop this specific recording
recordingCtx, cancel := context.WithCancel(ctx)
r.cancelFunc = cancel
r.isRecording = true
r.mu.Unlock()
slog.Info("Starting recording", "url", streamURL)
go r.recordStream(recordingCtx, streamURL) // Run in a goroutine
return nil
}
// StopRecording stops the current recording if one is in progress.
func (r *Recorder) StopRecording() {
r.mu.Lock()
defer r.mu.Unlock()
if r.isRecording && r.cancelFunc != nil {
slog.Info("Stopping current recording")
r.cancelFunc() // Signal the recording goroutine to stop
// The lock ensures that isRecording is set to false *after* the goroutine finishes
}
}
// recordStream performs the actual download and processing.
func (r *Recorder) recordStream(ctx context.Context, streamURL string) {
startTime := time.Now()
var tempFilePath string
// Defer setting isRecording to false and cleaning up cancelFunc
defer func() {
r.mu.Lock()
r.isRecording = false
r.cancelFunc = nil
r.mu.Unlock()
slog.Info("Recording process finished")
// Clean up temp file if it still exists (e.g., due to early error)
if tempFilePath != "" {
if _, err := os.Stat(tempFilePath); err == nil {
slog.Warn("Removing leftover temporary file", "path", tempFilePath)
os.Remove(tempFilePath)
}
}
}()
// Create temporary file
tempFile, err := os.CreateTemp(r.tempPath, "recording-*.tmp")
if err != nil {
slog.Error("Failed to create temporary file", "error", err)
return
}
tempFilePath = tempFile.Name()
slog.Debug("Created temporary file", "path", tempFilePath)
// Download stream
bytesWritten, err := r.downloadStream(ctx, streamURL, tempFile)
// Close the file explicitly *before* hashing and moving
if closeErr := tempFile.Close(); closeErr != nil {
slog.Error("Failed to close temporary file", "path", tempFilePath, "error", closeErr)
// If download also failed, prioritize that error
if err == nil {
err = closeErr // Report closing error if download was ok
}
}
if err != nil {
// Check if the error was due to context cancellation (graceful stop)
if ctx.Err() == context.Canceled {
slog.Info("Recording stopped via cancellation.")
// Proceed to process the partially downloaded file
} else {
slog.Error("Failed to download stream", "error", err)
// No need to keep the temp file if download failed unexpectedly
os.Remove(tempFilePath)
tempFilePath = "" // Prevent deferred cleanup from trying again
return
}
}
// If no bytes were written (e.g., stream closed immediately or cancelled before data), discard.
if bytesWritten == 0 {
slog.Warn("No data written to temporary file, discarding.", "path", tempFilePath)
os.Remove(tempFilePath)
tempFilePath = "" // Prevent deferred cleanup
return
}
endTime := time.Now()
duration := endTime.Sub(startTime)
// Generate hash
fileHash, err := hash.GenerateFileHash(tempFilePath)
if err != nil {
slog.Error("Failed to generate file hash", "path", tempFilePath, "error", err)
os.Remove(tempFilePath) // Clean up if hashing fails
tempFilePath = "" // Prevent deferred cleanup
return
}
// Generate final filename (e.g., based on timestamp)
finalFilename := fmt.Sprintf("recording_%s.mp3", startTime.Format("20060102_150405")) // Assuming mp3, adjust if needed
finalFilename = sanitizeFilename(finalFilename) // Ensure filename is valid
finalPath := filepath.Join(r.recordingsPath, finalFilename)
// Move temporary file to final location
if err := os.Rename(tempFilePath, finalPath); err != nil {
slog.Error("Failed to move temporary file to final location", "temp", tempFilePath, "final", finalPath, "error", err)
os.Remove(tempFilePath) // Attempt cleanup of temp file
tempFilePath = "" // Prevent deferred cleanup
return
}
tempFilePath = "" // File moved, clear path to prevent deferred cleanup
slog.Info("Recording saved", "path", finalPath, "size", bytesWritten, "duration", duration)
// Add record to database
_, err = r.db.AddRecordedFile(finalFilename, fileHash, bytesWritten, duration, startTime)
if err != nil {
slog.Error("Failed to add recording to database", "filename", finalFilename, "hash", fileHash, "error", err)
// Consider how to handle this - maybe retry later? For now, just log.
}
}
// downloadStream handles the HTTP GET request and writes the body to the writer.
func (r *Recorder) downloadStream(ctx context.Context, streamURL string, writer io.Writer) (int64, error) {
req, err := http.NewRequestWithContext(ctx, "GET", streamURL, nil)
if err != nil {
return 0, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("User-Agent", "icecast-ripper/1.0")
resp, err := r.client.Do(req)
if err != nil {
// Check if context was cancelled
if ctx.Err() == context.Canceled {
return 0, ctx.Err()
}
return 0, fmt.Errorf("failed to connect to stream: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return 0, fmt.Errorf("unexpected status code: %s", resp.Status)
}
slog.Debug("Connected to stream, starting download", "url", streamURL)
// Copy the response body to the writer (temp file)
// io.Copy will block until EOF or an error (including context cancellation via the request)
bytesWritten, err := io.Copy(writer, resp.Body)
if err != nil {
// Check if the error is due to context cancellation
if ctx.Err() == context.Canceled {
slog.Info("Stream download cancelled.")
return bytesWritten, ctx.Err() // Return context error
}
// Check for common stream disconnection errors (might need refinement)
if err == io.ErrUnexpectedEOF || strings.Contains(err.Error(), "connection reset by peer") || strings.Contains(err.Error(), "broken pipe") {
slog.Info("Stream disconnected gracefully (EOF or reset)", "bytes_written", bytesWritten)
return bytesWritten, nil // Treat as normal end of stream
}
return bytesWritten, fmt.Errorf("failed during stream copy: %w", err)
}
slog.Info("Stream download finished (EOF)", "bytes_written", bytesWritten)
return bytesWritten, nil
}
// sanitizeFilename removes potentially problematic characters from filenames.
func sanitizeFilename(filename string) string {
// Replace common problematic characters
replacer := strings.NewReplacer(
" ", "_",
"/", "-",
"\\", "-",
":", "-",
"*", "-",
"?", "-",
"\"", "'",
"<", "-",
">", "-",
"|", "-",
)
cleaned := replacer.Replace(filename)
// Trim leading/trailing underscores/hyphens/dots
cleaned = strings.Trim(cleaned, "_-. ")
// Limit length if necessary (though less common on modern filesystems)
// const maxLen = 200
// if len(cleaned) > maxLen {
// cleaned = cleaned[:maxLen]
// }
return cleaned
}

142
internal/rss/rss.go Normal file
View File

@@ -0,0 +1,142 @@
package rss
import (
"encoding/xml"
"fmt"
"log/slog"
"net/url"
"time"
"github.com/kemko/icecast-ripper/internal/config"
"github.com/kemko/icecast-ripper/internal/database"
)
// Structs for RSS 2.0 feed generation
// Based on https://validator.w3.org/feed/docs/rss2.html
type RSS struct {
XMLName xml.Name `xml:"rss"`
Version string `xml:"version,attr"`
Channel Channel `xml:"channel"`
}
type Channel struct {
XMLName xml.Name `xml:"channel"`
Title string `xml:"title"`
Link string `xml:"link"` // Link to the website/source
Description string `xml:"description"`
LastBuildDate string `xml:"lastBuildDate,omitempty"` // RFC1123Z format
Items []Item `xml:"item"`
}
type Item struct {
XMLName xml.Name `xml:"item"`
Title string `xml:"title"`
Link string `xml:"link"` // Link to the specific recording file
Description string `xml:"description"` // Can include duration, size etc.
PubDate string `xml:"pubDate"` // RFC1123Z format of recording time
GUID GUID `xml:"guid"` // Unique identifier (using file hash)
Enclosure Enclosure `xml:"enclosure"` // Describes the media file
}
// GUID needs IsPermaLink attribute
type GUID struct {
XMLName xml.Name `xml:"guid"`
IsPermaLink bool `xml:"isPermaLink,attr"`
Value string `xml:",chardata"`
}
// Enclosure describes the media file
type Enclosure struct {
XMLName xml.Name `xml:"enclosure"`
URL string `xml:"url,attr"`
Length int64 `xml:"length,attr"`
Type string `xml:"type,attr"` // MIME type (e.g., "audio/mpeg")
}
// Generator creates RSS feeds.
type Generator struct {
db *database.DB
feedBaseURL string // Base URL for links in the feed (e.g., http://server.com/recordings/)
recordingsPath string // Local path to recordings (needed for file info, maybe not directly used in feed)
feedTitle string
feedDesc string
}
// New creates a new RSS Generator instance.
func New(db *database.DB, cfg *config.Config, title, description string) *Generator {
// Ensure the base URL for recordings ends with a slash
baseURL := cfg.RSSFeedURL // This should be the URL base for *serving* files
if baseURL == "" {
slog.Warn("RSS_FEED_URL not set, RSS links might be incomplete. Using placeholder.")
baseURL = "http://localhost:8080/recordings/" // Placeholder
}
if baseURL[len(baseURL)-1:] != "/" {
baseURL += "/"
}
return &Generator{
db: db,
feedBaseURL: baseURL,
recordingsPath: cfg.RecordingsPath,
feedTitle: title,
feedDesc: description,
}
}
// GenerateFeed fetches recordings and produces the RSS feed XML as a byte slice.
func (g *Generator) GenerateFeed(maxItems int) ([]byte, error) {
slog.Debug("Generating RSS feed", "maxItems", maxItems)
recordings, err := g.db.GetRecordedFiles(maxItems)
if err != nil {
return nil, fmt.Errorf("failed to get recorded files for RSS feed: %w", err)
}
items := make([]Item, 0, len(recordings))
for _, rec := range recordings {
fileURL, err := url.JoinPath(g.feedBaseURL, rec.Filename)
if err != nil {
slog.Error("Failed to create file URL for RSS item", "filename", rec.Filename, "error", err)
continue // Skip this item if URL creation fails
}
item := Item{
Title: fmt.Sprintf("Recording %s", rec.RecordedAt.Format("2006-01-02 15:04")), // Example title
Link: fileURL,
Description: fmt.Sprintf("Icecast stream recording from %s. Duration: %s.", rec.RecordedAt.Format(time.RFC1123), rec.Duration.String()),
PubDate: rec.RecordedAt.Format(time.RFC1123Z), // Use RFC1123Z for pubDate
GUID: GUID{
IsPermaLink: false, // The hash itself is not a permalink URL
Value: rec.Hash,
},
Enclosure: Enclosure{
URL: fileURL,
Length: rec.FileSize,
Type: "audio/mpeg", // Assuming MP3, adjust if format varies or is detectable
},
}
items = append(items, item)
}
feed := RSS{
Version: "2.0",
Channel: Channel{
Title: g.feedTitle,
Link: g.feedBaseURL, // Link to the base URL or a relevant page
Description: g.feedDesc,
LastBuildDate: time.Now().Format(time.RFC1123Z),
Items: items,
},
}
xmlBytes, err := xml.MarshalIndent(feed, "", " ")
if err != nil {
return nil, fmt.Errorf("failed to marshal RSS feed to XML: %w", err)
}
// Add the standard XML header
output := append([]byte(xml.Header), xmlBytes...)
slog.Debug("RSS feed generated successfully", "item_count", len(items))
return output, nil
}

View File

@@ -0,0 +1,112 @@
package scheduler
import (
"context"
"log/slog"
"sync"
"time"
"github.com/kemko/icecast-ripper/internal/recorder"
"github.com/kemko/icecast-ripper/internal/streamchecker"
)
// Scheduler manages the periodic checking of the stream and triggers recordings.
type Scheduler struct {
interval time.Duration
checker *streamchecker.Checker
recorder *recorder.Recorder
stopChan chan struct{} // Channel to signal stopping the scheduler
stopOnce sync.Once // Ensures Stop is only called once
wg sync.WaitGroup // Waits for the run loop to finish
parentContext context.Context // Base context for recordings
}
// New creates a new Scheduler instance.
func New(interval time.Duration, checker *streamchecker.Checker, recorder *recorder.Recorder) *Scheduler {
return &Scheduler{
interval: interval,
checker: checker,
recorder: recorder,
stopChan: make(chan struct{}),
}
}
// Start begins the scheduling loop.
// It takes a parent context which will be used for recordings.
func (s *Scheduler) Start(ctx context.Context) {
slog.Info("Starting scheduler", "interval", s.interval.String())
s.parentContext = ctx
s.wg.Add(1)
go s.run()
}
// Stop signals the scheduling loop to terminate gracefully.
func (s *Scheduler) Stop() {
s.stopOnce.Do(func() {
slog.Info("Stopping scheduler...")
close(s.stopChan) // Signal the run loop to stop
// If a recording is in progress, stopping the scheduler doesn't automatically stop it.
// The recorder needs to be stopped separately if immediate termination is desired.
// s.recorder.StopRecording() // Uncomment if scheduler stop should also stop recording
s.wg.Wait() // Wait for the run loop to exit
slog.Info("Scheduler stopped.")
})
}
// run is the main loop for the scheduler.
func (s *Scheduler) run() {
defer s.wg.Done()
ticker := time.NewTicker(s.interval)
defer ticker.Stop()
for {
// Initial check immediately on start, then wait for ticker
s.checkAndRecord()
select {
case <-ticker.C:
// Check periodically based on the interval
s.checkAndRecord()
case <-s.stopChan:
slog.Info("Scheduler run loop exiting.")
return // Exit loop if stop signal received
case <-s.parentContext.Done():
slog.Info("Scheduler parent context cancelled, stopping.")
// Ensure Stop is called to clean up resources if not already stopped
s.Stop()
return
}
}
}
// checkAndRecord performs the stream check and starts recording if necessary.
func (s *Scheduler) checkAndRecord() {
// Do not check if a recording is already in progress
if s.recorder.IsRecording() {
slog.Debug("Skipping check, recording in progress.")
return
}
slog.Debug("Scheduler checking stream status...")
isLive, err := s.checker.IsLive()
if err != nil {
// Log the error but continue; maybe a temporary network issue
slog.Warn("Error checking stream status", "error", err)
return
}
if isLive {
slog.Info("Stream is live, attempting to start recording.")
// Use the parent context provided during Start for the recording
err := s.recorder.StartRecording(s.parentContext, s.checker.GetStreamURL())
if err != nil {
// This likely means a recording was *just* started by another check, which is fine.
slog.Warn("Failed to start recording (might already be in progress)", "error", err)
} else {
slog.Info("Recording started successfully by scheduler.")
// Recording started, the check loop will skip until it finishes
}
} else {
slog.Debug("Stream is not live, waiting for next interval.")
}
}

109
internal/server/server.go Normal file
View File

@@ -0,0 +1,109 @@
package server
import (
"context"
"errors"
"log/slog"
"net/http"
"path/filepath"
"time"
"github.com/kemko/icecast-ripper/internal/config"
"github.com/kemko/icecast-ripper/internal/rss"
)
// Server handles HTTP requests for the RSS feed and recordings.
type Server struct {
server *http.Server
rssGenerator *rss.Generator
recordingsPath string
serverAddress string
}
// New creates a new HTTP server instance.
func New(cfg *config.Config, rssGenerator *rss.Generator) *Server {
mux := http.NewServeMux()
s := &Server{
rssGenerator: rssGenerator,
recordingsPath: cfg.RecordingsPath,
serverAddress: cfg.ServerAddress,
}
// Handler for the RSS feed
mux.HandleFunc("GET /rss", s.handleRSS)
// Handler for serving static recording files
// Ensure recordingsPath is absolute or relative to the working directory
absRecordingsPath, err := filepath.Abs(cfg.RecordingsPath)
if err != nil {
slog.Error("Failed to get absolute path for recordings directory", "path", cfg.RecordingsPath, "error", err)
// Decide how to handle this - maybe panic or return an error from New?
// For now, log and continue, file serving might fail.
absRecordingsPath = cfg.RecordingsPath // Fallback to original path
}
slog.Info("Serving recordings from", "path", absRecordingsPath)
fileServer := http.FileServer(http.Dir(absRecordingsPath))
// The path must end with a trailing slash for FileServer to work correctly with subdirectories
mux.Handle("GET /recordings/", http.StripPrefix("/recordings/", fileServer))
s.server = &http.Server{
Addr: cfg.ServerAddress,
Handler: mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 60 * time.Second,
}
return s
}
// handleRSS generates and serves the RSS feed.
func (s *Server) handleRSS(w http.ResponseWriter, r *http.Request) {
slog.Debug("Received request for RSS feed")
// TODO: Consider adding caching headers (ETag, Last-Modified)
// based on the latest recording timestamp or a hash of the feed content.
// Generate feed (limit to a reasonable number, e.g., 50 latest items)
maxItems := 50
feedBytes, err := s.rssGenerator.GenerateFeed(maxItems)
if err != nil {
slog.Error("Failed to generate RSS feed", "error", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/rss+xml; charset=utf-8")
w.WriteHeader(http.StatusOK)
_, err = w.Write(feedBytes)
if err != nil {
slog.Error("Failed to write RSS feed response", "error", err)
}
}
// Start begins listening for HTTP requests.
func (s *Server) Start() error {
slog.Info("Starting HTTP server", "address", s.serverAddress)
// Run the server in a separate goroutine so it doesn't block.
go func() {
if err := s.server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
slog.Error("HTTP server ListenAndServe error", "error", err)
// Consider signaling failure to the main application thread if needed
}
}()
return nil // Return immediately
}
// Stop gracefully shuts down the HTTP server.
func (s *Server) Stop(ctx context.Context) error {
slog.Info("Stopping HTTP server...")
shutdownCtx, cancel := context.WithTimeout(ctx, 15*time.Second) // Give 15 seconds to shutdown gracefully
defer cancel()
if err := s.server.Shutdown(shutdownCtx); err != nil {
slog.Error("HTTP server graceful shutdown failed", "error", err)
return err
}
slog.Info("HTTP server stopped.")
return nil
}

View File

@@ -0,0 +1,62 @@
package streamchecker
import (
"fmt"
"log/slog"
"net/http"
"time"
)
// Checker checks if an Icecast stream is live.
type Checker struct {
streamURL string
client *http.Client
}
// New creates a new Checker instance.
func New(streamURL string) *Checker {
return &Checker{
streamURL: streamURL,
client: &http.Client{
Timeout: 10 * time.Second, // Sensible timeout for checking a stream
},
}
}
// IsLive checks if the stream URL is currently broadcasting.
// It performs a GET request and checks for a 200 OK status.
// Note: Some Icecast servers might behave differently; this might need adjustment.
func (c *Checker) IsLive() (bool, error) {
slog.Debug("Checking stream status", "url", c.streamURL)
req, err := http.NewRequest("GET", c.streamURL, nil)
if err != nil {
return false, fmt.Errorf("failed to create request for stream check: %w", err)
}
// Set a user agent; some servers might require it.
req.Header.Set("User-Agent", "icecast-ripper/1.0")
// Icy-Metadata header can sometimes force the server to respond even if the stream is idle,
// but we want to know if it's *actually* streaming audio data.
// req.Header.Set("Icy-Metadata", "1")
resp, err := c.client.Do(req)
if err != nil {
// Network errors (DNS lookup failure, connection refused) usually mean not live.
slog.Warn("Failed to connect to stream URL during check", "url", c.streamURL, "error", err)
return false, nil // Treat connection errors as 'not live'
}
defer resp.Body.Close()
// A 200 OK status generally indicates the stream is live and broadcasting.
if resp.StatusCode == http.StatusOK {
slog.Info("Stream is live", "url", c.streamURL, "status", resp.StatusCode)
return true, nil
}
slog.Info("Stream is not live", "url", c.streamURL, "status", resp.StatusCode)
return false, nil
}
// GetStreamURL returns the URL being checked.
func (c *Checker) GetStreamURL() string {
return c.streamURL
}

View File

@@ -1,3 +0,0 @@
aiohttp==3.9.4
yattag==1.15.2
python-dotenv==1.0.0

View File

@@ -1,47 +0,0 @@
"""cache"""
import sqlite3
from config import load_configuration
def db_init(db_name='recorded_file_hashes.db'):
"""
Connect to the database and return the connection object.
"""
config = load_configuration()
conn = sqlite3.connect(config.output_directory + '/' + db_name)
conn.execute("CREATE TABLE IF NOT EXISTS file_hashes (file_path TEXT PRIMARY KEY, file_hash TEXT)")
return conn
def get_cached_hash(file_path):
"""
Cache the file hash in the database.
"""
conn = db_init()
# check if file_path exists then return it hash else insert
cursor = conn.execute("SELECT file_hash FROM file_hashes WHERE file_path = ? LIMIT 1", (file_path,))
row = cursor.fetchone()
if row:
conn.close()
return row[0]
return False
def cache_hash(file_path, file_hash):
"""
Cache the file hash in the database.
"""
try:
conn = db_init()
conn.execute("INSERT INTO file_hashes (file_path, file_hash) VALUES (?, ?)", (file_path, file_hash))
conn.commit()
conn.close()
return True
except sqlite3.IntegrityError as e:
conn.close()
print(e)
return False

View File

@@ -1,55 +0,0 @@
"""Module for loading configuration values from command line arguments, environment variables and defaults"""
import argparse
import os
from dotenv import load_dotenv
# Load .env file if available
load_dotenv()
# Default configuration values
DEFAULTS = {
'server_host': 'https://example.org',
'server_port': 8080,
'stream_url': 'http://example.com/stream',
'output_directory': './records',
'check_interval': 60,
'timeout_connect': 10,
'timeout_read': 30,
'log_level': 'info'
}
def parse_arguments():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Icecast Ripper Service')
parser.add_argument('--server-host', help='Server host name with protocol')
parser.add_argument('--server-port', type=int, help='Server port number')
parser.add_argument('--stream-url', help='URL of the Icecast stream to monitor and record')
parser.add_argument('--file-url-base', help='Base URL used for constructing file links in the RSS feed')
parser.add_argument('--output-directory', help='Directory to save the recordings')
parser.add_argument('--check-interval', type=int, help='Interval to check the stream in seconds')
parser.add_argument('--timeout-connect', type=int, help='Timeout for connecting to the stream in seconds')
parser.add_argument('--timeout-read', type=int, help='Read timeout in seconds')
parser.add_argument('--log-level', help='Log level')
return vars(parser.parse_args())
def load_configuration():
"""Get values from command line arguments, environment variables and defaults"""
cmd_args = parse_arguments()
# Configuration is established using a priority: CommandLine > EnvironmentVars > Defaults
config = {
'server_host': cmd_args['server_host'] or os.getenv('SERVER_HOST') or DEFAULTS['server_host'],
'server_port': cmd_args['server_port'] or os.getenv('SERVER_PORT') or DEFAULTS['server_port'],
'stream_url': cmd_args['stream_url'] or os.getenv('STREAM_URL') or DEFAULTS['stream_url'],
'output_directory': cmd_args['output_directory'] or os.getenv('OUTPUT_DIRECTORY') or DEFAULTS['output_directory'],
'check_interval': cmd_args['check_interval'] or os.getenv('CHECK_INTERVAL') or DEFAULTS['check_interval'],
'timeout_connect': cmd_args['timeout_connect'] or os.getenv('TIMEOUT_CONNECT') or DEFAULTS['timeout_connect'],
'timeout_read': cmd_args['timeout_read'] or os.getenv('TIMEOUT_READ') or DEFAULTS['timeout_read'],
'log_level': cmd_args['log_level'] or os.getenv('LOG_LEVEL') or DEFAULTS['log_level']
}
# Converting string paths to absolute paths
config['output_directory'] = os.path.abspath(config['output_directory'])
return argparse.Namespace(**config)

View File

@@ -1,36 +0,0 @@
"""This module contains the logger functions for the application"""
import json
import sys
from datetime import datetime
from config import load_configuration
# Log levels
LOG_LEVELS = {
"DEBUG": 10,
"INFO": 20,
"WARNING": 30,
"ERROR": 40,
"FATAL": 50
}
def log_event(event, details, level="INFO"):
"""Log an event to stdout in JSON format"""
config = load_configuration()
config_level_name = config.log_level.upper()
if config_level_name not in LOG_LEVELS:
raise ValueError(f"Invalid log level {config_level_name} in configuration")
config_level_number = LOG_LEVELS[config_level_name]
event_log_level = LOG_LEVELS.get(level.upper(), 20) # Defaults to INFO if level is invalid
if event_log_level >= config_level_number:
log_entry = {
"timestamp": datetime.utcnow().isoformat(),
"event": event,
"level": level.upper(),
"details": details
}
json_log_entry = json.dumps(log_entry)
print(json_log_entry, file=sys.stdout)
sys.stdout.flush() # Immediately flush the log entry

View File

@@ -1,42 +0,0 @@
"""Main entry point for the Icecast stream checker and ripper"""
import asyncio
from server import start_server
from stream_checker import StreamChecker
from config import load_configuration
from logger import log_event
def main():
"""Main entry point for the Icecast stream checker and ripper"""
# Load configuration from command line arguments and environment variables
config = load_configuration()
log_event("service_start", {"config": config.__dict__}, level="DEBUG")
# Create the StreamChecker instance
checker = StreamChecker(
stream_url=config.stream_url,
check_interval=config.check_interval,
timeout_connect=config.timeout_connect,
timeout_read=config.timeout_read,
output_directory=config.output_directory
)
# Start the Icecast stream checking and recording loop
checker_task = asyncio.ensure_future(checker.run())
# Start the health check and file serving server
server_task = asyncio.ensure_future(start_server(config))
# Run both tasks in the event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(asyncio.gather(checker_task, server_task))
except KeyboardInterrupt:
pass
finally:
checker_task.cancel()
server_task.cancel()
loop.close()
if __name__ == "__main__":
main()

View File

@@ -1,78 +0,0 @@
"""Ripper class for recording a stream to a file"""
import os
from datetime import datetime, timedelta
import aiohttp
from logger import log_event
from utils import sanitize_filename
class Ripper: # pylint: disable=too-many-instance-attributes
"""Ripper class for recording a stream to a file"""
def __init__(self, stream_url, output_directory, timeout_connect=10, timeout_read=30):
self.stream_url = stream_url
self.output_directory = output_directory
self.timeout_read = timeout_read
self.timeout_connect = timeout_connect
self.file_name = None
self.file_path = None
self.start_time = None
self.last_data_time = None
self.is_recording = False
async def start_recording(self):
"""Start recording the stream to a file"""
if not os.path.exists(self.output_directory):
try:
os.makedirs(self.output_directory)
except Exception as e: # pylint: disable=broad-except
log_event("output_directory_error", {"error": str(e)}, level="ERROR")
self.start_time = datetime.utcnow()
domain = self.stream_url.split("//")[-1].split("/")[0]
sanitized_domain = sanitize_filename(domain)
date_str = self.start_time.strftime("%Y%m%d_%H%M%S")
self.file_name = f"{sanitized_domain}_{date_str}.mp3.tmp"
self.file_path = os.path.join(self.output_directory, self.file_name)
try:
timeout = aiohttp.ClientTimeout(total=None, connect=self.timeout_connect, sock_read=self.timeout_read)
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.get(self.stream_url) as response:
if response.status == 200:
self.is_recording = True
log_event("recording_started", {"file_name": self.file_name, "stream_url": self.stream_url})
async for data, _ in response.content.iter_chunks():
if not data:
break
self.last_data_time = datetime.utcnow()
with open(self.file_path, 'ab') as f:
f.write(data)
# Check if timeout exceeded between data chunks
if datetime.utcnow() - self.last_data_time > timedelta(seconds=self.timeout_read):
log_event("timeout_exceeded", {
"stream_url": self.stream_url,
"elapsed_seconds": (datetime.utcnow() - self.last_data_time).total_seconds()
}, level="WARNING")
break
log_event("recording_finished", {"file_name": self.file_name, "stream_url": self.stream_url})
else:
log_event("stream_unavailable", {"http_status": response.status})
except Exception as e: # pylint: disable=broad-except
log_event('recording_error', {"error": str(e)}, level="ERROR")
finally:
self.is_recording = False
self.end_recording()
def end_recording(self):
"""Rename the temporary file to a finished file"""
if os.path.exists(self.file_path):
finished_file = self.file_path.replace('.tmp', '')
os.rename(self.file_path, finished_file)
log_event("recording_saved", {
"file_name": finished_file,
"duration": (datetime.utcnow() - self.start_time).total_seconds() if self.start_time else 0
})
def is_active(self):
"""Check if the ripper is currently recording a stream"""
return self.is_recording

View File

@@ -1,44 +0,0 @@
"""Generates an RSS feed from the files in the output directory"""
import os
from datetime import datetime
from yattag import Doc
from utils import generate_file_hash, file_hash_to_id
def generate_rss_feed(files, output_directory, server_host):
"""Generates an RSS feed from the files in the output directory"""
doc, tag, text = Doc().tagtext()
doc.asis('<?xml version="1.0" encoding="UTF-8"?>')
with tag('rss'):
doc.attr(
('version', '2.0'),
('xmlns:itunes', 'http://www.itunes.com/dtds/podcast-1.0.dtd'),
('xmlns:content', 'http://purl.org/rss/1.0/modules/content/'),
)
with tag('channel'):
with tag('title'):
text('Icecast Stream Recordings')
with tag('description'):
text('The latest recordings from the Icecast server.')
with tag('link'):
text(server_host)
with tag('itunes:block'):
text('yes')
for file_name in files:
file_path = os.path.join(output_directory, file_name)
file_hash = generate_file_hash(file_path)
file_id = file_hash_to_id(file_hash)
with tag('item'):
with tag('enclosure', url=f'{server_host}/files/{file_name}', length=os.path.getsize(file_path), type='audio/mpeg'):
pass
with tag('title'):
text(file_name)
with tag('guid', isPermaLink='false'):
text(file_id)
with tag('pubDate'):
pub_date = datetime.fromtimestamp(os.path.getctime(file_path)).strftime('%a, %d %b %Y %H:%M:%S UTC')
text(pub_date)
return doc.getvalue()

View File

@@ -1,70 +0,0 @@
"""Server module for the application"""
import os
import mimetypes
from pathlib import Path
from aiohttp import web
import logger
from rss_generator import generate_rss_feed
routes = web.RouteTableDef()
def log_request(request, level="INFO"):
"""Log an HTTP request"""
logger.log_event("http_request", {
"method": request.method,
"path": request.path,
"remote": request.remote,
"query_string": request.query_string,
"headers": dict(request.headers)
}, level)
@routes.get('/health')
async def helth_check(request):
"""Health check endpoint"""
log_request(request)
return web.Response(text="OK")
@routes.get('/rss')
async def rss_feed(request):
"""RSS feed endpoint"""
log_request(request)
output_directory = request.app['config'].output_directory
files = [f for f in os.listdir(output_directory) if f.endswith('.mp3')]
rss_xml = generate_rss_feed(files, output_directory, request.app['config'].server_host)
return web.Response(text=rss_xml, content_type='application/rss+xml')
@routes.get('/files/{file_name}')
async def serve_file(request):
"""File serving endpoint"""
file_name = request.match_info['file_name']
log_request(request)
output_directory = request.app['config'].output_directory
file_path = os.path.join(output_directory, file_name)
if not Path(output_directory).joinpath(file_name).resolve().relative_to(Path(output_directory).resolve()):
logger.log_event("file_access_denied", {"file_name": file_name}, level="WARNING")
return web.Response(status=403, text='Access denied')
if not os.path.exists(file_path):
logger.log_event("file_not_found", {"file_name": file_name}, level="WARNING")
return web.Response(status=404, text='File not found')
file = os.path.basename(file_path)
content_type, _ = mimetypes.guess_type(file)
headers = {
'Content-Type': content_type or 'application/octet-stream',
}
return web.FileResponse(file_path, headers=headers)
async def start_server(config):
"""Start the web server"""
app = web.Application()
app['config'] = config
app.add_routes(routes)
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, '0.0.0.0', config.server_port)
logger.log_event('server_starting', {'port': config.server_port}, level="DEBUG")
await site.start()

View File

@@ -1,45 +0,0 @@
"""Checking the stream status and starting the ripper"""
import asyncio
from aiohttp import ClientSession, ClientTimeout
from ripper import Ripper
from logger import log_event
class StreamChecker:
"""Checking the stream status and starting the ripper"""
def __init__(self, stream_url, check_interval, timeout_connect, output_directory, timeout_read=30): # pylint: disable=too-many-arguments
self.stream_url = stream_url
self.check_interval = check_interval
self.timeout_connect = timeout_connect
self.timeout_read = timeout_read
self.output_directory = output_directory
self.ripper = None
self.is_stream_live = False
async def check_stream(self, session):
"""Check if the stream is live and start the ripper if needed"""
try:
timeout = ClientTimeout(connect=self.timeout_connect)
async with session.get(self.stream_url, timeout=timeout, allow_redirects=True) as response:
if response.status == 200:
self.is_stream_live = True
log_event("stream_live", {"stream_url": self.stream_url})
else:
self.is_stream_live = False
log_event("stream_offline", {"stream_url": self.stream_url})
except asyncio.TimeoutError:
log_event("check_stream_timeout", {"stream_url": self.stream_url})
except Exception as e: # pylint: disable=broad-except
print(self.stream_url)
log_event("check_stream_error", {"error": str(e)})
async def run(self):
"""Start the stream checking and recording loop"""
while True:
async with ClientSession() as session:
await self.check_stream(session)
if self.is_stream_live and (self.ripper is None or not self.ripper.is_active()):
self.ripper = Ripper(self.stream_url, self.output_directory, self.timeout_read)
await self.ripper.start_recording()
await asyncio.sleep(int(self.check_interval))

View File

@@ -1,39 +0,0 @@
"""Utility functions for the application"""
import hashlib
import string
from cache import get_cached_hash, cache_hash
def sanitize_filename(filename):
"""
Sanitize the filename by removing or replacing invalid characters.
"""
valid_chars = f"-_.() {string.ascii_letters}{string.digits}"
cleaned_filename = "".join(c for c in filename if c in valid_chars)
cleaned_filename = cleaned_filename.replace(' ', '_') # Replace spaces with underscores
return cleaned_filename
def generate_file_hash(file_path):
"""
Generate a hash for file contents to uniquely identify files.
"""
file_hash = get_cached_hash(file_path)
if file_hash:
return file_hash
hasher = hashlib.sha256()
with open(file_path, 'rb') as f:
while chunk := f.read(8192):
hasher.update(chunk)
file_hash = hasher.hexdigest()
cache_hash(file_path, file_hash)
return file_hash
def file_hash_to_id(file_hash, length=32):
"""
Convert file hash to a shorter file ID, considering only the first length characters.
"""
return file_hash[:length]

14
vendor/github.com/fsnotify/fsnotify/.cirrus.yml generated vendored Normal file
View File

@@ -0,0 +1,14 @@
freebsd_task:
name: 'FreeBSD'
freebsd_instance:
image_family: freebsd-14-2
install_script:
- pkg update -f
- pkg install -y go
test_script:
# run tests as user "cirrus" instead of root
- pw useradd cirrus -m
- chown -R cirrus:cirrus .
- FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./...
- FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./...

10
vendor/github.com/fsnotify/fsnotify/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,10 @@
# go test -c output
*.test
*.test.exe
# Output of go build ./cmd/fsnotify
/fsnotify
/fsnotify.exe
/test/kqueue
/test/a.out

2
vendor/github.com/fsnotify/fsnotify/.mailmap generated vendored Normal file
View File

@@ -0,0 +1,2 @@
Chris Howey <howeyc@gmail.com> <chris@howey.me>
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>

602
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md generated vendored Normal file
View File

@@ -0,0 +1,602 @@
# Changelog
1.9.0 2024-04-04
----------------
### Changes and fixes
- all: make BufferedWatcher buffered again ([#657])
- inotify: fix race when adding/removing watches while a watched path is being
deleted ([#678], [#686])
- inotify: don't send empty event if a watched path is unmounted ([#655])
- inotify: don't register duplicate watches when watching both a symlink and its
target; previously that would get "half-added" and removing the second would
panic ([#679])
- kqueue: fix watching relative symlinks ([#681])
- kqueue: correctly mark pre-existing entries when watching a link to a dir on
kqueue ([#682])
- illumos: don't send error if changed file is deleted while processing the
event ([#678])
[#657]: https://github.com/fsnotify/fsnotify/pull/657
[#678]: https://github.com/fsnotify/fsnotify/pull/678
[#686]: https://github.com/fsnotify/fsnotify/pull/686
[#655]: https://github.com/fsnotify/fsnotify/pull/655
[#681]: https://github.com/fsnotify/fsnotify/pull/681
[#679]: https://github.com/fsnotify/fsnotify/pull/679
[#682]: https://github.com/fsnotify/fsnotify/pull/682
1.8.0 2024-10-31
----------------
### Additions
- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619])
### Changes and fixes
- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610])
- kqueue: ignore events with Ident=0 ([#590])
- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617])
- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625])
- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620])
- inotify: fix panic when calling Remove() in a goroutine ([#650])
- fen: allow watching subdirectories of watched directories ([#621])
[#590]: https://github.com/fsnotify/fsnotify/pull/590
[#610]: https://github.com/fsnotify/fsnotify/pull/610
[#617]: https://github.com/fsnotify/fsnotify/pull/617
[#619]: https://github.com/fsnotify/fsnotify/pull/619
[#620]: https://github.com/fsnotify/fsnotify/pull/620
[#621]: https://github.com/fsnotify/fsnotify/pull/621
[#625]: https://github.com/fsnotify/fsnotify/pull/625
[#650]: https://github.com/fsnotify/fsnotify/pull/650
1.7.0 - 2023-10-22
------------------
This version of fsnotify needs Go 1.17.
### Additions
- illumos: add FEN backend to support illumos and Solaris. ([#371])
- all: add `NewBufferedWatcher()` to use a buffered channel, which can be useful
in cases where you can't control the kernel buffer and receive a large number
of events in bursts. ([#550], [#572])
- all: add `AddWith()`, which is identical to `Add()` but allows passing
options. ([#521])
- windows: allow setting the ReadDirectoryChangesW() buffer size with
`fsnotify.WithBufferSize()`; the default of 64K is the highest value that
works on all platforms and is enough for most purposes, but in some cases a
highest buffer is needed. ([#521])
### Changes and fixes
- inotify: remove watcher if a watched path is renamed ([#518])
After a rename the reported name wasn't updated, or even an empty string.
Inotify doesn't provide any good facilities to update it, so just remove the
watcher. This is already how it worked on kqueue and FEN.
On Windows this does work, and remains working.
- windows: don't listen for file attribute changes ([#520])
File attribute changes are sent as `FILE_ACTION_MODIFIED` by the Windows API,
with no way to see if they're a file write or attribute change, so would show
up as a fsnotify.Write event. This is never useful, and could result in many
spurious Write events.
- windows: return `ErrEventOverflow` if the buffer is full ([#525])
Before it would merely return "short read", making it hard to detect this
error.
- kqueue: make sure events for all files are delivered properly when removing a
watched directory ([#526])
Previously they would get sent with `""` (empty string) or `"."` as the path
name.
- kqueue: don't emit spurious Create events for symbolic links ([#524])
The link would get resolved but kqueue would "forget" it already saw the link
itself, resulting on a Create for every Write event for the directory.
- all: return `ErrClosed` on `Add()` when the watcher is closed ([#516])
- other: add `Watcher.Errors` and `Watcher.Events` to the no-op `Watcher` in
`backend_other.go`, making it easier to use on unsupported platforms such as
WASM, AIX, etc. ([#528])
- other: use the `backend_other.go` no-op if the `appengine` build tag is set;
Google AppEngine forbids usage of the unsafe package so the inotify backend
won't compile there.
[#371]: https://github.com/fsnotify/fsnotify/pull/371
[#516]: https://github.com/fsnotify/fsnotify/pull/516
[#518]: https://github.com/fsnotify/fsnotify/pull/518
[#520]: https://github.com/fsnotify/fsnotify/pull/520
[#521]: https://github.com/fsnotify/fsnotify/pull/521
[#524]: https://github.com/fsnotify/fsnotify/pull/524
[#525]: https://github.com/fsnotify/fsnotify/pull/525
[#526]: https://github.com/fsnotify/fsnotify/pull/526
[#528]: https://github.com/fsnotify/fsnotify/pull/528
[#537]: https://github.com/fsnotify/fsnotify/pull/537
[#550]: https://github.com/fsnotify/fsnotify/pull/550
[#572]: https://github.com/fsnotify/fsnotify/pull/572
1.6.0 - 2022-10-13
------------------
This version of fsnotify needs Go 1.16 (this was already the case since 1.5.1,
but not documented). It also increases the minimum Linux version to 2.6.32.
### Additions
- all: add `Event.Has()` and `Op.Has()` ([#477])
This makes checking events a lot easier; for example:
if event.Op&Write == Write && !(event.Op&Remove == Remove) {
}
Becomes:
if event.Has(Write) && !event.Has(Remove) {
}
- all: add cmd/fsnotify ([#463])
A command-line utility for testing and some examples.
### Changes and fixes
- inotify: don't ignore events for files that don't exist ([#260], [#470])
Previously the inotify watcher would call `os.Lstat()` to check if a file
still exists before emitting events.
This was inconsistent with other platforms and resulted in inconsistent event
reporting (e.g. when a file is quickly removed and re-created), and generally
a source of confusion. It was added in 2013 to fix a memory leak that no
longer exists.
- all: return `ErrNonExistentWatch` when `Remove()` is called on a path that's
not watched ([#460])
- inotify: replace epoll() with non-blocking inotify ([#434])
Non-blocking inotify was not generally available at the time this library was
written in 2014, but now it is. As a result, the minimum Linux version is
bumped from 2.6.27 to 2.6.32. This hugely simplifies the code and is faster.
- kqueue: don't check for events every 100ms ([#480])
The watcher would wake up every 100ms, even when there was nothing to do. Now
it waits until there is something to do.
- macos: retry opening files on EINTR ([#475])
- kqueue: skip unreadable files ([#479])
kqueue requires a file descriptor for every file in a directory; this would
fail if a file was unreadable by the current user. Now these files are simply
skipped.
- windows: fix renaming a watched directory if the parent is also watched ([#370])
- windows: increase buffer size from 4K to 64K ([#485])
- windows: close file handle on Remove() ([#288])
- kqueue: put pathname in the error if watching a file fails ([#471])
- inotify, windows: calling Close() more than once could race ([#465])
- kqueue: improve Close() performance ([#233])
- all: various documentation additions and clarifications.
[#233]: https://github.com/fsnotify/fsnotify/pull/233
[#260]: https://github.com/fsnotify/fsnotify/pull/260
[#288]: https://github.com/fsnotify/fsnotify/pull/288
[#370]: https://github.com/fsnotify/fsnotify/pull/370
[#434]: https://github.com/fsnotify/fsnotify/pull/434
[#460]: https://github.com/fsnotify/fsnotify/pull/460
[#463]: https://github.com/fsnotify/fsnotify/pull/463
[#465]: https://github.com/fsnotify/fsnotify/pull/465
[#470]: https://github.com/fsnotify/fsnotify/pull/470
[#471]: https://github.com/fsnotify/fsnotify/pull/471
[#475]: https://github.com/fsnotify/fsnotify/pull/475
[#477]: https://github.com/fsnotify/fsnotify/pull/477
[#479]: https://github.com/fsnotify/fsnotify/pull/479
[#480]: https://github.com/fsnotify/fsnotify/pull/480
[#485]: https://github.com/fsnotify/fsnotify/pull/485
## [1.5.4] - 2022-04-25
* Windows: add missing defer to `Watcher.WatchList` [#447](https://github.com/fsnotify/fsnotify/pull/447)
* go.mod: use latest x/sys [#444](https://github.com/fsnotify/fsnotify/pull/444)
* Fix compilation for OpenBSD [#443](https://github.com/fsnotify/fsnotify/pull/443)
## [1.5.3] - 2022-04-22
* This version is retracted. An incorrect branch is published accidentally [#445](https://github.com/fsnotify/fsnotify/issues/445)
## [1.5.2] - 2022-04-21
* Add a feature to return the directories and files that are being monitored [#374](https://github.com/fsnotify/fsnotify/pull/374)
* Fix potential crash on windows if `raw.FileNameLength` exceeds `syscall.MAX_PATH` [#361](https://github.com/fsnotify/fsnotify/pull/361)
* Allow build on unsupported GOOS [#424](https://github.com/fsnotify/fsnotify/pull/424)
* Don't set `poller.fd` twice in `newFdPoller` [#406](https://github.com/fsnotify/fsnotify/pull/406)
* fix go vet warnings: call to `(*T).Fatalf` from a non-test goroutine [#416](https://github.com/fsnotify/fsnotify/pull/416)
## [1.5.1] - 2021-08-24
* Revert Add AddRaw to not follow symlinks [#394](https://github.com/fsnotify/fsnotify/pull/394)
## [1.5.0] - 2021-08-20
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
[#378](https://github.com/fsnotify/fsnotify/pull/378)
[#381](https://github.com/fsnotify/fsnotify/pull/381)
[#385](https://github.com/fsnotify/fsnotify/pull/385)
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
## [1.4.9] - 2020-03-11
* Move example usage to the readme #329. This may resolve #328.
## [1.4.8] - 2020-03-10
* CI: test more go versions (@nathany 1d13583d846ea9d66dcabbfefbfb9d8e6fb05216)
* Tests: Queued inotify events could have been read by the test before max_queued_events was hit (@matthias-stone #265)
* Tests: t.Fatalf -> t.Errorf in go routines (@gdey #266)
* CI: Less verbosity (@nathany #267)
* Tests: Darwin: Exchangedata is deprecated on 10.13 (@nathany #267)
* Tests: Check if channels are closed in the example (@alexeykazakov #244)
* CI: Only run golint on latest version of go and fix issues (@cpuguy83 #284)
* CI: Add windows to travis matrix (@cpuguy83 #284)
* Docs: Remover appveyor badge (@nathany 11844c0959f6fff69ba325d097fce35bd85a8e93)
* Linux: create epoll and pipe fds with close-on-exec (@JohannesEbke #219)
* Linux: open files with close-on-exec (@linxiulei #273)
* Docs: Plan to support fanotify (@nathany ab058b44498e8b7566a799372a39d150d9ea0119 )
* Project: Add go.mod (@nathany #309)
* Project: Revise editor config (@nathany #309)
* Project: Update copyright for 2019 (@nathany #309)
* CI: Drop go1.8 from CI matrix (@nathany #309)
* Docs: Updating the FAQ section for supportability with NFS & FUSE filesystems (@Pratik32 4bf2d1fec78374803a39307bfb8d340688f4f28e )
## [1.4.7] - 2018-01-09
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
* Tests: Fix missing verb on format string (thanks @rchiossi)
* Linux: Fix deadlock in Remove (thanks @aarondl)
* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne)
* Docs: Moved FAQ into the README (thanks @vahe)
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
* Docs: replace references to OS X with macOS
## [1.4.2] - 2016-10-10
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
## [1.4.1] - 2016-10-04
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
## [1.4.0] - 2016-10-01
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
## [1.3.1] - 2016-06-28
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
## [1.3.0] - 2016-04-19
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
## [1.2.10] - 2016-03-02
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
## [1.2.9] - 2016-01-13
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
## [1.2.8] - 2015-12-17
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
* inotify: fix race in test
* enable race detection for continuous integration (Linux, Mac, Windows)
## [1.2.5] - 2015-10-17
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
## [1.2.1] - 2015-10-14
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
## [1.2.0] - 2015-02-08
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
## [1.1.1] - 2015-02-05
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
## [1.1.0] - 2014-12-12
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
* add low-level functions
* only need to store flags on directories
* less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
* done can be an unbuffered channel
* remove calls to os.NewSyscallError
* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## [1.0.4] - 2014-09-07
* kqueue: add dragonfly to the build tags.
* Rename source code files, rearrange code so exported APIs are at the top.
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
## [1.0.3] - 2014-08-19
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
## [1.0.2] - 2014-08-17
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
## [1.0.0] - 2014-08-15
* [API] Remove AddWatch on Windows, use Add.
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
* Minor updates based on feedback from golint.
## dev / 2014-07-09
* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
## dev / 2014-07-04
* kqueue: fix incorrect mutex used in Close()
* Update example to demonstrate usage of Op.
## dev / 2014-06-28
* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
* Fix for String() method on Event (thanks Alex Brainman)
* Don't build on Plan 9 or Solaris (thanks @4ad)
## dev / 2014-06-21
* Events channel of type Event rather than *Event.
* [internal] use syscall constants directly for inotify and kqueue.
* [internal] kqueue: rename events to kevents and fileEvent to event.
## dev / 2014-06-19
* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
* [internal] remove cookie from Event struct (unused).
* [internal] Event struct has the same definition across every OS.
* [internal] remove internal watch and removeWatch methods.
## dev / 2014-06-12
* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
* [API] Pluralized channel names: Events and Errors.
* [API] Renamed FileEvent struct to Event.
* [API] Op constants replace methods like IsCreate().
## dev / 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## dev / 2014-05-23
* [API] Remove current implementation of WatchFlags.
* current implementation doesn't take advantage of OS for efficiency
* provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes
* no tests for the current implementation
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
## [0.9.3] - 2014-12-31
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
## [0.9.2] - 2014-08-17
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
## [0.9.1] - 2014-06-12
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
## [0.9.0] - 2014-01-17
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
## [0.8.12] - 2013-11-13
* [API] Remove FD_SET and friends from Linux adapter
## [0.8.11] - 2013-11-02
* [Doc] Add Changelog [#72][] (thanks @nathany)
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
## [0.8.10] - 2013-10-19
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
* [Doc] specify OS-specific limits in README (thanks @debrando)
## [0.8.9] - 2013-09-08
* [Doc] Contributing (thanks @nathany)
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
* [Doc] GoCI badge in README (Linux only) [#60][]
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
## [0.8.8] - 2013-06-17
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
## [0.8.7] - 2013-06-03
* [API] Make syscall flags internal
* [Fix] inotify: ignore event changes
* [Fix] race in symlink test [#45][] (reported by @srid)
* [Fix] tests on Windows
* lower case error messages
## [0.8.6] - 2013-05-23
* kqueue: Use EVT_ONLY flag on Darwin
* [Doc] Update README with full example
## [0.8.5] - 2013-05-09
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
## [0.8.4] - 2013-04-07
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
## [0.8.3] - 2013-03-13
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
## [0.8.2] - 2013-02-07
* [Doc] add Authors
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
## [0.8.1] - 2013-01-09
* [Fix] Windows path separators
* [Doc] BSD License
## [0.8.0] - 2012-11-09
* kqueue: directory watching improvements (thanks @vmirage)
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
## [0.7.4] - 2012-10-09
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
* [Fix] kqueue: modify after recreation of file
## [0.7.3] - 2012-09-27
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
* [Fix] kqueue: no longer get duplicate CREATE events
## [0.7.2] - 2012-09-01
* kqueue: events for created directories
## [0.7.1] - 2012-07-14
* [Fix] for renaming files
## [0.7.0] - 2012-07-02
* [Feature] FSNotify flags
* [Fix] inotify: Added file name back to event path
## [0.6.0] - 2012-06-06
* kqueue: watch files after directory created (thanks @tmc)
## [0.5.1] - 2012-05-22
* [Fix] inotify: remove all watches before Close()
## [0.5.0] - 2012-05-03
* [API] kqueue: return errors during watch instead of sending over channel
* kqueue: match symlink behavior on Linux
* inotify: add `DELETE_SELF` (requested by @taralx)
* [Fix] kqueue: handle EINTR (reported by @robfig)
* [Doc] Godoc example [#1][] (thanks @davecheney)
## [0.4.0] - 2012-03-30
* Go 1 released: build with go tool
* [Feature] Windows support using winfsnotify
* Windows does not have attribute change notifications
* Roll attribute notifications into IsModify
## [0.3.0] - 2012-02-19
* kqueue: add files when watch directory
## [0.2.0] - 2011-12-30
* update to latest Go weekly code
## [0.1.0] - 2011-10-19
* kqueue: add watch on file creation to match inotify
* kqueue: create file event
* inotify: ignore `IN_IGNORED` events
* event String()
* linux: common FileEvent functions
* initial commit
[#79]: https://github.com/howeyc/fsnotify/pull/79
[#77]: https://github.com/howeyc/fsnotify/pull/77
[#72]: https://github.com/howeyc/fsnotify/issues/72
[#71]: https://github.com/howeyc/fsnotify/issues/71
[#70]: https://github.com/howeyc/fsnotify/issues/70
[#63]: https://github.com/howeyc/fsnotify/issues/63
[#62]: https://github.com/howeyc/fsnotify/issues/62
[#60]: https://github.com/howeyc/fsnotify/issues/60
[#59]: https://github.com/howeyc/fsnotify/issues/59
[#49]: https://github.com/howeyc/fsnotify/issues/49
[#45]: https://github.com/howeyc/fsnotify/issues/45
[#40]: https://github.com/howeyc/fsnotify/issues/40
[#36]: https://github.com/howeyc/fsnotify/issues/36
[#33]: https://github.com/howeyc/fsnotify/issues/33
[#29]: https://github.com/howeyc/fsnotify/issues/29
[#25]: https://github.com/howeyc/fsnotify/issues/25
[#24]: https://github.com/howeyc/fsnotify/issues/24
[#21]: https://github.com/howeyc/fsnotify/issues/21

145
vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,145 @@
Thank you for your interest in contributing to fsnotify! We try to review and
merge PRs in a reasonable timeframe, but please be aware that:
- To avoid "wasted" work, please discuss changes on the issue tracker first. You
can just send PRs, but they may end up being rejected for one reason or the
other.
- fsnotify is a cross-platform library, and changes must work reasonably well on
all supported platforms.
- Changes will need to be compatible; old code should still compile, and the
runtime behaviour can't change in ways that are likely to lead to problems for
users.
Testing
-------
Just `go test ./...` runs all the tests; the CI runs this on all supported
platforms. Testing different platforms locally can be done with something like
[goon] or [Vagrant], but this isn't super-easy to set up at the moment.
Use the `-short` flag to make the "stress test" run faster.
Writing new tests
-----------------
Scripts in the testdata directory allow creating test cases in a "shell-like"
syntax. The basic format is:
script
Output:
desired output
For example:
# Create a new empty file with some data.
watch /
echo data >/file
Output:
create /file
write /file
Just create a new file to add a new test; select which tests to run with
`-run TestScript/[path]`.
script
------
The script is a "shell-like" script:
cmd arg arg
Comments are supported with `#`:
# Comment
cmd arg arg # Comment
All operations are done in a temp directory; a path like "/foo" is rewritten to
"/tmp/TestFoo/foo".
Arguments can be quoted with `"` or `'`; there are no escapes and they're
functionally identical right now, but this may change in the future, so best to
assume shell-like rules.
touch "/file with spaces"
End-of-line escapes with `\` are not supported.
### Supported commands
watch path [ops] # Watch the path, reporting events for it. Nothing is
# watched by default. Optionally a list of ops can be
# given, as with AddWith(path, WithOps(...)).
unwatch path # Stop watching the path.
watchlist n # Assert watchlist length.
stop # Stop running the script; for debugging.
debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in
parallel by default, so -parallel=1 is probably a good
idea).
print [any strings] # Print text to stdout; for debugging.
touch path
mkdir [-p] dir
ln -s target link # Only ln -s supported.
mkfifo path
mknod dev path
mv src dst
rm [-r] path
chmod mode path # Octal only
sleep time-in-ms
cat path # Read path (does nothing with the data; just reads it).
echo str >>path # Append "str" to "path".
echo str >path # Truncate "path" and write "str".
require reason # Skip the test if "reason" is true; "skip" and
skip reason # "require" behave identical; it supports both for
# readability. Possible reasons are:
#
# always Always skip this test.
# symlink Symlinks are supported (requires admin
# permissions on Windows).
# mkfifo Platform doesn't support FIFO named sockets.
# mknod Platform doesn't support device nodes.
output
------
After `Output:` the desired output is given; this is indented by convention, but
that's not required.
The format of that is:
# Comment
event path # Comment
system:
event path
system2:
event path
Every event is one line, and any whitespace between the event and path are
ignored. The path can optionally be surrounded in ". Anything after a "#" is
ignored.
Platform-specific tests can be added after GOOS; for example:
watch /
touch /file
Output:
# Tested if nothing else matches
create /file
# Windows-specific test.
windows:
write /file
You can specify multiple platforms with a comma (e.g. "windows, linux:").
"kqueue" is a shortcut for all kqueue systems (BSD, macOS).
[goon]: https://github.com/arp242/goon
[Vagrant]: https://www.vagrantup.com/
[integration_test.go]: /integration_test.go

25
vendor/github.com/fsnotify/fsnotify/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright © 2012 The Go Authors. All rights reserved.
Copyright © fsnotify Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of Google Inc. nor the names of its contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

182
vendor/github.com/fsnotify/fsnotify/README.md generated vendored Normal file
View File

@@ -0,0 +1,182 @@
fsnotify is a Go library to provide cross-platform filesystem notifications on
Windows, Linux, macOS, BSD, and illumos.
Go 1.17 or newer is required; the full documentation is at
https://pkg.go.dev/github.com/fsnotify/fsnotify
---
Platform support:
| Backend | OS | Status |
| :-------------------- | :--------- | :------------------------------------------------------------------------ |
| inotify | Linux | Supported |
| kqueue | BSD, macOS | Supported |
| ReadDirectoryChangesW | Windows | Supported |
| FEN | illumos | Supported |
| fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) |
| FSEvents | macOS | [Needs support in x/sys/unix][fsevents] |
| USN Journals | Windows | [Needs support in x/sys/windows][usn] |
| Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) |
Linux and illumos should include Android and Solaris, but these are currently
untested.
[fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120
[usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847
Usage
-----
A basic example:
```go
package main
import (
"log"
"github.com/fsnotify/fsnotify"
)
func main() {
// Create new watcher.
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
// Start listening for events.
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
}
log.Println("event:", event)
if event.Has(fsnotify.Write) {
log.Println("modified file:", event.Name)
}
case err, ok := <-watcher.Errors:
if !ok {
return
}
log.Println("error:", err)
}
}
}()
// Add a path.
err = watcher.Add("/tmp")
if err != nil {
log.Fatal(err)
}
// Block main goroutine forever.
<-make(chan struct{})
}
```
Some more examples can be found in [cmd/fsnotify](cmd/fsnotify), which can be
run with:
% go run ./cmd/fsnotify
Further detailed documentation can be found in godoc:
https://pkg.go.dev/github.com/fsnotify/fsnotify
FAQ
---
### Will a file still be watched when it's moved to another directory?
No, not unless you are watching the location it was moved to.
### Are subdirectories watched?
No, you must add watches for any directory you want to watch (a recursive
watcher is on the roadmap: [#18]).
[#18]: https://github.com/fsnotify/fsnotify/issues/18
### Do I have to watch the Error and Event channels in a goroutine?
Yes. You can read both channels in the same goroutine using `select` (you don't
need a separate goroutine for both channels; see the example).
### Why don't notifications work with NFS, SMB, FUSE, /proc, or /sys?
fsnotify requires support from underlying OS to work. The current NFS and SMB
protocols does not provide network level support for file notifications, and
neither do the /proc and /sys virtual filesystems.
This could be fixed with a polling watcher ([#9]), but it's not yet implemented.
[#9]: https://github.com/fsnotify/fsnotify/issues/9
### Why do I get many Chmod events?
Some programs may generate a lot of attribute changes; for example Spotlight on
macOS, anti-virus programs, backup applications, and some others are known to do
this. As a rule, it's typically best to ignore Chmod events. They're often not
useful, and tend to cause problems.
Spotlight indexing on macOS can result in multiple events (see [#15]). A
temporary workaround is to add your folder(s) to the *Spotlight Privacy
settings* until we have a native FSEvents implementation (see [#11]).
[#11]: https://github.com/fsnotify/fsnotify/issues/11
[#15]: https://github.com/fsnotify/fsnotify/issues/15
### Watching a file doesn't work well
Watching individual files (rather than directories) is generally not recommended
as many programs (especially editors) update files atomically: it will write to
a temporary file which is then moved to to destination, overwriting the original
(or some variant thereof). The watcher on the original file is now lost, as that
no longer exists.
The upshot of this is that a power failure or crash won't leave a half-written
file.
Watch the parent directory and use `Event.Name` to filter out files you're not
interested in. There is an example of this in `cmd/fsnotify/file.go`.
Platform-specific notes
-----------------------
### Linux
When a file is removed a REMOVE event won't be emitted until all file
descriptors are closed; it will emit a CHMOD instead:
fp := os.Open("file")
os.Remove("file") // CHMOD
fp.Close() // REMOVE
This is the event that inotify sends, so not much can be changed about this.
The `fs.inotify.max_user_watches` sysctl variable specifies the upper limit for
the number of watches per user, and `fs.inotify.max_user_instances` specifies
the maximum number of inotify instances per user. Every Watcher you create is an
"instance", and every path you add is a "watch".
These are also exposed in `/proc` as `/proc/sys/fs/inotify/max_user_watches` and
`/proc/sys/fs/inotify/max_user_instances`
To increase them you can use `sysctl` or write the value to proc file:
# The default values on Linux 5.18
sysctl fs.inotify.max_user_watches=124983
sysctl fs.inotify.max_user_instances=128
To make the changes persist on reboot edit `/etc/sysctl.conf` or
`/usr/lib/sysctl.d/50-default.conf` (details differ per Linux distro; check your
distro's documentation):
fs.inotify.max_user_watches=124983
fs.inotify.max_user_instances=128
Reaching the limit will result in a "no space left on device" or "too many open
files" error.
### kqueue (macOS, all BSD systems)
kqueue requires opening a file descriptor for every file that's being watched;
so if you're watching a directory with five files then that's six file
descriptors. You will run in to your system's "max open files" limit faster on
these platforms.
The sysctl variables `kern.maxfiles` and `kern.maxfilesperproc` can be used to
control the maximum number of open files.

467
vendor/github.com/fsnotify/fsnotify/backend_fen.go generated vendored Normal file
View File

@@ -0,0 +1,467 @@
//go:build solaris
// FEN backend for illumos (supported) and Solaris (untested, but should work).
//
// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create
package fsnotify
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
"time"
"github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
type fen struct {
*shared
Events chan Event
Errors chan error
mu sync.Mutex
port *unix.EventPort
dirs map[string]Op // Explicitly watched directories
watches map[string]Op // Explicitly watched non-directories
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
w := &fen{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
dirs: make(map[string]Op),
watches: make(map[string]Op),
}
var err error
w.port, err = unix.NewEventPort()
if err != nil {
return nil, fmt.Errorf("fsnotify.NewWatcher: %w", err)
}
go w.readEvents()
return w, nil
}
func (w *fen) Close() error {
if w.shared.close() {
return nil
}
return w.port.Close()
}
func (w *fen) Add(name string) error { return w.AddWith(name) }
func (w *fen) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
with := getOptions(opts...)
if !w.xSupports(with.op) {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
// Currently we resolve symlinks that were explicitly requested to be
// watched. Otherwise we would use LStat here.
stat, err := os.Stat(name)
if err != nil {
return err
}
// Associate all files in the directory.
if stat.IsDir() {
err := w.handleDirectory(name, stat, true, w.associateFile)
if err != nil {
return err
}
w.mu.Lock()
w.dirs[name] = with.op
w.mu.Unlock()
return nil
}
err = w.associateFile(name, stat, true)
if err != nil {
return err
}
w.mu.Lock()
w.watches[name] = with.op
w.mu.Unlock()
return nil
}
func (w *fen) Remove(name string) error {
if w.isClosed() {
return nil
}
if !w.port.PathIsWatched(name) {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
// The user has expressed an intent. Immediately remove this name from
// whichever watch list it might be in. If it's not in there the delete
// doesn't cause harm.
w.mu.Lock()
delete(w.watches, name)
delete(w.dirs, name)
w.mu.Unlock()
stat, err := os.Stat(name)
if err != nil {
return err
}
// Remove associations for every file in the directory.
if stat.IsDir() {
err := w.handleDirectory(name, stat, false, w.dissociateFile)
if err != nil {
return err
}
return nil
}
err = w.port.DissociatePath(name)
if err != nil {
return err
}
return nil
}
// readEvents contains the main loop that runs in a goroutine watching for events.
func (w *fen) readEvents() {
// If this function returns, the watcher has been closed and we can close
// these channels
defer func() {
close(w.Errors)
close(w.Events)
}()
pevents := make([]unix.PortEvent, 8)
for {
count, err := w.port.Get(pevents, 1, nil)
if err != nil && err != unix.ETIME {
// Interrupted system call (count should be 0) ignore and continue
if errors.Is(err, unix.EINTR) && count == 0 {
continue
}
// Get failed because we called w.Close()
if errors.Is(err, unix.EBADF) && w.isClosed() {
return
}
// There was an error not caused by calling w.Close()
if !w.sendError(fmt.Errorf("port.Get: %w", err)) {
return
}
}
p := pevents[:count]
for _, pevent := range p {
if pevent.Source != unix.PORT_SOURCE_FILE {
// Event from unexpected source received; should never happen.
if !w.sendError(errors.New("Event from unexpected source received")) {
return
}
continue
}
if debug {
internal.Debug(pevent.Path, pevent.Events)
}
err = w.handleEvent(&pevent)
if !w.sendError(err) {
return
}
}
}
}
func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error {
files, err := os.ReadDir(path)
if err != nil {
return err
}
// Handle all children of the directory.
for _, entry := range files {
finfo, err := entry.Info()
if err != nil {
return err
}
err = handler(filepath.Join(path, finfo.Name()), finfo, false)
if err != nil {
return err
}
}
// And finally handle the directory itself.
return handler(path, stat, follow)
}
// handleEvent might need to emit more than one fsnotify event if the events
// bitmap matches more than one event type (e.g. the file was both modified and
// had the attributes changed between when the association was created and the
// when event was returned)
func (w *fen) handleEvent(event *unix.PortEvent) error {
var (
events = event.Events
path = event.Path
fmode = event.Cookie.(os.FileMode)
reRegister = true
)
w.mu.Lock()
_, watchedDir := w.dirs[path]
_, watchedPath := w.watches[path]
w.mu.Unlock()
isWatched := watchedDir || watchedPath
if events&unix.FILE_DELETE != 0 {
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
reRegister = false
}
if events&unix.FILE_RENAME_FROM != 0 {
if !w.sendEvent(Event{Name: path, Op: Rename}) {
return nil
}
// Don't keep watching the new file name
reRegister = false
}
if events&unix.FILE_RENAME_TO != 0 {
// We don't report a Rename event for this case, because Rename events
// are interpreted as referring to the _old_ name of the file, and in
// this case the event would refer to the new name of the file. This
// type of rename event is not supported by fsnotify.
// inotify reports a Remove event in this case, so we simulate this
// here.
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't keep watching the file that was removed
reRegister = false
}
// The file is gone, nothing left to do.
if !reRegister {
if watchedDir {
w.mu.Lock()
delete(w.dirs, path)
w.mu.Unlock()
}
if watchedPath {
w.mu.Lock()
delete(w.watches, path)
w.mu.Unlock()
}
return nil
}
// If we didn't get a deletion the file still exists and we're going to have
// to watch it again. Let's Stat it now so that we can compare permissions
// and have what we need to continue watching the file
stat, err := os.Lstat(path)
if err != nil {
// This is unexpected, but we should still emit an event. This happens
// most often on "rm -r" of a subdirectory inside a watched directory We
// get a modify event of something happening inside, but by the time we
// get here, the sudirectory is already gone. Clearly we were watching
// this path but now it is gone. Let's tell the user that it was
// removed.
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Suppress extra write events on removed directories; they are not
// informative and can be confusing.
return nil
}
// resolve symlinks that were explicitly watched as we would have at Add()
// time. this helps suppress spurious Chmod events on watched symlinks
if isWatched {
stat, err = os.Stat(path)
if err != nil {
// The symlink still exists, but the target is gone. Report the
// Remove similar to above.
if !w.sendEvent(Event{Name: path, Op: Remove}) {
return nil
}
// Don't return the error
}
}
if events&unix.FILE_MODIFIED != 0 {
if fmode.IsDir() && watchedDir {
if err := w.updateDirectory(path); err != nil {
return err
}
} else {
if !w.sendEvent(Event{Name: path, Op: Write}) {
return nil
}
}
}
if events&unix.FILE_ATTRIB != 0 && stat != nil {
// Only send Chmod if perms changed
if stat.Mode().Perm() != fmode.Perm() {
if !w.sendEvent(Event{Name: path, Op: Chmod}) {
return nil
}
}
}
if stat != nil {
// If we get here, it means we've hit an event above that requires us to
// continue watching the file or directory
err := w.associateFile(path, stat, isWatched)
if errors.Is(err, fs.ErrNotExist) {
// Path may have been removed since the stat.
err = nil
}
return err
}
return nil
}
// The directory was modified, so we must find unwatched entities and watch
// them. If something was removed from the directory, nothing will happen, as
// everything else should still be watched.
func (w *fen) updateDirectory(path string) error {
files, err := os.ReadDir(path)
if err != nil {
// Directory no longer exists: probably just deleted since we got the
// event.
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return err
}
for _, entry := range files {
path := filepath.Join(path, entry.Name())
if w.port.PathIsWatched(path) {
continue
}
finfo, err := entry.Info()
if err != nil {
return err
}
err = w.associateFile(path, finfo, false)
if errors.Is(err, fs.ErrNotExist) {
// File may have disappeared between getting the dir listing and
// adding the port: that's okay to ignore.
continue
}
if !w.sendError(err) {
return nil
}
if !w.sendEvent(Event{Name: path, Op: Create}) {
return nil
}
}
return nil
}
func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error {
if w.isClosed() {
return ErrClosed
}
// This is primarily protecting the call to AssociatePath but it is
// important and intentional that the call to PathIsWatched is also
// protected by this mutex. Without this mutex, AssociatePath has been seen
// to error out that the path is already associated.
w.mu.Lock()
defer w.mu.Unlock()
if w.port.PathIsWatched(path) {
// Remove the old association in favor of this one If we get ENOENT,
// then while the x/sys/unix wrapper still thought that this path was
// associated, the underlying event port did not. This call will have
// cleared up that discrepancy. The most likely cause is that the event
// has fired but we haven't processed it yet.
err := w.port.DissociatePath(path)
if err != nil && !errors.Is(err, unix.ENOENT) {
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
}
}
var events int
if !follow {
// Watch symlinks themselves rather than their targets unless this entry
// is explicitly watched.
events |= unix.FILE_NOFOLLOW
}
if true { // TODO: implement withOps()
events |= unix.FILE_MODIFIED
}
if true {
events |= unix.FILE_ATTRIB
}
err := w.port.AssociatePath(path, stat, events, stat.Mode())
if err != nil {
return fmt.Errorf("port.AssociatePath(%q): %w", path, err)
}
return nil
}
func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error {
if !w.port.PathIsWatched(path) {
return nil
}
err := w.port.DissociatePath(path)
if err != nil {
return fmt.Errorf("port.DissociatePath(%q): %w", path, err)
}
return nil
}
func (w *fen) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches)+len(w.dirs))
for pathname := range w.dirs {
entries = append(entries, pathname)
}
for pathname := range w.watches {
entries = append(entries, pathname)
}
return entries
}
func (w *fen) xSupports(op Op) bool {
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
return false
}
return true
}

583
vendor/github.com/fsnotify/fsnotify/backend_inotify.go generated vendored Normal file
View File

@@ -0,0 +1,583 @@
//go:build linux && !appengine
package fsnotify
import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"sync"
"time"
"unsafe"
"github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
type inotify struct {
*shared
Events chan Event
Errors chan error
// Store fd here as os.File.Read() will no longer return on close after
// calling Fd(). See: https://github.com/golang/go/issues/26439
fd int
inotifyFile *os.File
watches *watches
doneResp chan struct{} // Channel to respond to Close
// Store rename cookies in an array, with the index wrapping to 0. Almost
// all of the time what we get is a MOVED_FROM to set the cookie and the
// next event inotify sends will be MOVED_TO to read it. However, this is
// not guaranteed as described in inotify(7) and we may get other events
// between the two MOVED_* events (including other MOVED_* ones).
//
// A second issue is that moving a file outside the watched directory will
// trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to
// read and delete it. So just storing it in a map would slowly leak memory.
//
// Doing it like this gives us a simple fast LRU-cache that won't allocate.
// Ten items should be more than enough for our purpose, and a loop over
// such a short array is faster than a map access anyway (not that it hugely
// matters since we're talking about hundreds of ns at the most, but still).
cookies [10]koekje
cookieIndex uint8
cookiesMu sync.Mutex
}
type (
watches struct {
wd map[uint32]*watch // wd → watch
path map[string]uint32 // pathname → wd
}
watch struct {
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
path string // Watch path.
recurse bool // Recursion with ./...?
}
koekje struct {
cookie uint32
path string
}
)
func newWatches() *watches {
return &watches{
wd: make(map[uint32]*watch),
path: make(map[string]uint32),
}
}
func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] }
func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] }
func (w *watches) len() int { return len(w.wd) }
func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd }
func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) }
func (w *watches) removePath(path string) ([]uint32, error) {
path, recurse := recursivePath(path)
wd, ok := w.path[path]
if !ok {
return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path)
}
watch := w.wd[wd]
if recurse && !watch.recurse {
return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path)
}
delete(w.path, path)
delete(w.wd, wd)
if !watch.recurse {
return []uint32{wd}, nil
}
wds := make([]uint32, 0, 8)
wds = append(wds, wd)
for p, rwd := range w.path {
if strings.HasPrefix(p, path) {
delete(w.path, p)
delete(w.wd, rwd)
wds = append(wds, rwd)
}
}
return wds, nil
}
func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error {
var existing *watch
wd, ok := w.path[path]
if ok {
existing = w.wd[wd]
}
upd, err := f(existing)
if err != nil {
return err
}
if upd != nil {
w.wd[upd.wd] = upd
w.path[upd.path] = upd.wd
if upd.wd != wd {
delete(w.wd, wd)
}
}
return nil
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
// Need to set nonblocking mode for SetDeadline to work, otherwise blocking
// I/O operations won't terminate on close.
fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK)
if fd == -1 {
return nil, errno
}
w := &inotify{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
fd: fd,
inotifyFile: os.NewFile(uintptr(fd), ""),
watches: newWatches(),
doneResp: make(chan struct{}),
}
go w.readEvents()
return w, nil
}
func (w *inotify) Close() error {
if w.shared.close() {
return nil
}
// Causes any blocking reads to return with an error, provided the file
// still supports deadline operations.
err := w.inotifyFile.Close()
if err != nil {
return err
}
<-w.doneResp // Wait for readEvents() to finish.
return nil
}
func (w *inotify) Add(name string) error { return w.AddWith(name) }
func (w *inotify) AddWith(path string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
time.Now().Format("15:04:05.000000000"), path)
}
with := getOptions(opts...)
if !w.xSupports(with.op) {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
add := func(path string, with withOpts, recurse bool) error {
var flags uint32
if with.noFollow {
flags |= unix.IN_DONT_FOLLOW
}
if with.op.Has(Create) {
flags |= unix.IN_CREATE
}
if with.op.Has(Write) {
flags |= unix.IN_MODIFY
}
if with.op.Has(Remove) {
flags |= unix.IN_DELETE | unix.IN_DELETE_SELF
}
if with.op.Has(Rename) {
flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF
}
if with.op.Has(Chmod) {
flags |= unix.IN_ATTRIB
}
if with.op.Has(xUnportableOpen) {
flags |= unix.IN_OPEN
}
if with.op.Has(xUnportableRead) {
flags |= unix.IN_ACCESS
}
if with.op.Has(xUnportableCloseWrite) {
flags |= unix.IN_CLOSE_WRITE
}
if with.op.Has(xUnportableCloseRead) {
flags |= unix.IN_CLOSE_NOWRITE
}
return w.register(path, flags, recurse)
}
w.mu.Lock()
defer w.mu.Unlock()
path, recurse := recursivePath(path)
if recurse {
return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.IsDir() {
if root == path {
return fmt.Errorf("fsnotify: not a directory: %q", path)
}
return nil
}
// Send a Create event when adding new directory from a recursive
// watch; this is for "mkdir -p one/two/three". Usually all those
// directories will be created before we can set up watchers on the
// subdirectories, so only "one" would be sent as a Create event and
// not "one/two" and "one/two/three" (inotifywait -r has the same
// problem).
if with.sendCreate && root != path {
w.sendEvent(Event{Name: root, Op: Create})
}
return add(root, with, true)
})
}
return add(path, with, false)
}
func (w *inotify) register(path string, flags uint32, recurse bool) error {
return w.watches.updatePath(path, func(existing *watch) (*watch, error) {
if existing != nil {
flags |= existing.flags | unix.IN_MASK_ADD
}
wd, err := unix.InotifyAddWatch(w.fd, path, flags)
if wd == -1 {
return nil, err
}
if e, ok := w.watches.wd[uint32(wd)]; ok {
return e, nil
}
if existing == nil {
return &watch{
wd: uint32(wd),
path: path,
flags: flags,
recurse: recurse,
}, nil
}
existing.wd = uint32(wd)
existing.flags = flags
return existing, nil
})
}
func (w *inotify) Remove(name string) error {
if w.isClosed() {
return nil
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
w.mu.Lock()
defer w.mu.Unlock()
return w.remove(filepath.Clean(name))
}
func (w *inotify) remove(name string) error {
wds, err := w.watches.removePath(name)
if err != nil {
return err
}
for _, wd := range wds {
_, err := unix.InotifyRmWatch(w.fd, wd)
if err != nil {
// TODO: Perhaps it's not helpful to return an error here in every
// case; the only two possible errors are:
//
// EBADF, which happens when w.fd is not a valid file descriptor of
// any kind.
//
// EINVAL, which is when fd is not an inotify descriptor or wd is
// not a valid watch descriptor. Watch descriptors are invalidated
// when they are removed explicitly or implicitly; explicitly by
// inotify_rm_watch, implicitly when the file they are watching is
// deleted.
return err
}
}
return nil
}
func (w *inotify) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, w.watches.len())
for pathname := range w.watches.path {
entries = append(entries, pathname)
}
return entries
}
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Events channel
func (w *inotify) readEvents() {
defer func() {
close(w.doneResp)
close(w.Errors)
close(w.Events)
}()
var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
for {
if w.isClosed() {
return
}
n, err := w.inotifyFile.Read(buf[:])
if err != nil {
if errors.Is(err, os.ErrClosed) {
return
}
if !w.sendError(err) {
return
}
continue
}
if n < unix.SizeofInotifyEvent {
err := errors.New("notify: short read in readEvents()") // Read was too short.
if n == 0 {
err = io.EOF // If EOF is received. This should really never happen.
}
if !w.sendError(err) {
return
}
continue
}
// We don't know how many events we just read into the buffer While the
// offset points to at least one whole event.
var offset uint32
for offset <= uint32(n-unix.SizeofInotifyEvent) {
// Point to the event in the buffer.
inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 {
if !w.sendError(ErrEventOverflow) {
return
}
}
ev, ok := w.handleEvent(inEvent, &buf, offset)
if !ok {
return
}
if !w.sendEvent(ev) {
return
}
// Move to the next event in the buffer
offset += unix.SizeofInotifyEvent + inEvent.Len
}
}
}
func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) {
w.mu.Lock()
defer w.mu.Unlock()
/// If the event happened to the watched directory or the watched file, the
/// kernel doesn't append the filename to the event, but we would like to
/// always fill the the "Name" field with a valid filename. We retrieve the
/// path of the watch from the "paths" map.
///
/// Can be nil if Remove() was called in another goroutine for this path
/// inbetween reading the events from the kernel and reading the internal
/// state. Not much we can do about it, so just skip. See #616.
watch := w.watches.byWd(uint32(inEvent.Wd))
if watch == nil {
return Event{}, true
}
var (
name = watch.path
nameLen = uint32(inEvent.Len)
)
if nameLen > 0 {
/// Point "bytes" at the first byte of the filename
bb := *buf
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
/// The filename is padded with NULL bytes. TrimRight() gets rid of those.
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00")
}
if debug {
internal.Debug(name, inEvent.Mask, inEvent.Cookie)
}
if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 {
w.watches.remove(watch)
return Event{}, true
}
// inotify will automatically remove the watch on deletes; just need
// to clean our state here.
if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
w.watches.remove(watch)
}
// We can't really update the state when a watched path is moved; only
// IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch.
if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF {
if watch.recurse { // Do nothing
return Event{}, true
}
err := w.remove(watch.path)
if err != nil && !errors.Is(err, ErrNonExistentWatch) {
if !w.sendError(err) {
return Event{}, false
}
}
}
/// Skip if we're watching both this path and the parent; the parent will
/// already send a delete so no need to do it twice.
if inEvent.Mask&unix.IN_DELETE_SELF != 0 {
_, ok := w.watches.path[filepath.Dir(watch.path)]
if ok {
return Event{}, true
}
}
ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie)
// Need to update watch path for recurse.
if watch.recurse {
isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR
/// New directory created: set up watch on it.
if isDir && ev.Has(Create) {
err := w.register(ev.Name, watch.flags, true)
if !w.sendError(err) {
return Event{}, false
}
// This was a directory rename, so we need to update all the
// children.
//
// TODO: this is of course pretty slow; we should use a better data
// structure for storing all of this, e.g. store children in the
// watch. I have some code for this in my kqueue refactor we can use
// in the future. For now I'm okay with this as it's not publicly
// available. Correctness first, performance second.
if ev.renamedFrom != "" {
for k, ww := range w.watches.wd {
if k == watch.wd || ww.path == ev.Name {
continue
}
if strings.HasPrefix(ww.path, ev.renamedFrom) {
ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1)
w.watches.wd[k] = ww
}
}
}
}
}
return ev, true
}
func (w *inotify) isRecursive(path string) bool {
ww := w.watches.byPath(path)
if ww == nil { // path could be a file, so also check the Dir.
ww = w.watches.byPath(filepath.Dir(path))
}
return ww != nil && ww.recurse
}
func (w *inotify) newEvent(name string, mask, cookie uint32) Event {
e := Event{Name: name}
if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
e.Op |= Create
}
if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
e.Op |= Remove
}
if mask&unix.IN_MODIFY == unix.IN_MODIFY {
e.Op |= Write
}
if mask&unix.IN_OPEN == unix.IN_OPEN {
e.Op |= xUnportableOpen
}
if mask&unix.IN_ACCESS == unix.IN_ACCESS {
e.Op |= xUnportableRead
}
if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE {
e.Op |= xUnportableCloseWrite
}
if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE {
e.Op |= xUnportableCloseRead
}
if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
e.Op |= Rename
}
if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
e.Op |= Chmod
}
if cookie != 0 {
if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
w.cookiesMu.Lock()
w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name}
w.cookieIndex++
if w.cookieIndex > 9 {
w.cookieIndex = 0
}
w.cookiesMu.Unlock()
} else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
w.cookiesMu.Lock()
var prev string
for _, c := range w.cookies {
if c.cookie == cookie {
prev = c.path
break
}
}
w.cookiesMu.Unlock()
e.renamedFrom = prev
}
}
return e
}
func (w *inotify) xSupports(op Op) bool {
return true // Supports everything.
}
func (w *inotify) state() {
w.mu.Lock()
defer w.mu.Unlock()
for wd, ww := range w.watches.wd {
fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path)
}
}

705
vendor/github.com/fsnotify/fsnotify/backend_kqueue.go generated vendored Normal file
View File

@@ -0,0 +1,705 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"sync"
"time"
"github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/unix"
)
type kqueue struct {
*shared
Events chan Event
Errors chan error
kq int // File descriptor (as returned by the kqueue() syscall).
closepipe [2]int // Pipe used for closing kq.
watches *watches
}
type (
watches struct {
mu sync.RWMutex
wd map[int]watch // wd → watch
path map[string]int // pathname → wd
byDir map[string]map[int]struct{} // dirname(path) → wd
seen map[string]struct{} // Keep track of if we know this file exists.
byUser map[string]struct{} // Watches added with Watcher.Add()
}
watch struct {
wd int
name string
linkName string // In case of links; name is the target, and this is the link.
isDir bool
dirFlags uint32
}
)
func newWatches() *watches {
return &watches{
wd: make(map[int]watch),
path: make(map[string]int),
byDir: make(map[string]map[int]struct{}),
seen: make(map[string]struct{}),
byUser: make(map[string]struct{}),
}
}
func (w *watches) listPaths(userOnly bool) []string {
w.mu.RLock()
defer w.mu.RUnlock()
if userOnly {
l := make([]string, 0, len(w.byUser))
for p := range w.byUser {
l = append(l, p)
}
return l
}
l := make([]string, 0, len(w.path))
for p := range w.path {
l = append(l, p)
}
return l
}
func (w *watches) watchesInDir(path string) []string {
w.mu.RLock()
defer w.mu.RUnlock()
l := make([]string, 0, 4)
for fd := range w.byDir[path] {
info := w.wd[fd]
if _, ok := w.byUser[info.name]; !ok {
l = append(l, info.name)
}
}
return l
}
// Mark path as added by the user.
func (w *watches) addUserWatch(path string) {
w.mu.Lock()
defer w.mu.Unlock()
w.byUser[path] = struct{}{}
}
func (w *watches) addLink(path string, fd int) {
w.mu.Lock()
defer w.mu.Unlock()
w.path[path] = fd
w.seen[path] = struct{}{}
}
func (w *watches) add(path, linkPath string, fd int, isDir bool) {
w.mu.Lock()
defer w.mu.Unlock()
w.path[path] = fd
w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir}
parent := filepath.Dir(path)
byDir, ok := w.byDir[parent]
if !ok {
byDir = make(map[int]struct{}, 1)
w.byDir[parent] = byDir
}
byDir[fd] = struct{}{}
}
func (w *watches) byWd(fd int) (watch, bool) {
w.mu.RLock()
defer w.mu.RUnlock()
info, ok := w.wd[fd]
return info, ok
}
func (w *watches) byPath(path string) (watch, bool) {
w.mu.RLock()
defer w.mu.RUnlock()
info, ok := w.wd[w.path[path]]
return info, ok
}
func (w *watches) updateDirFlags(path string, flags uint32) bool {
w.mu.Lock()
defer w.mu.Unlock()
fd, ok := w.path[path]
if !ok { // Already deleted: don't re-set it here.
return false
}
info := w.wd[fd]
info.dirFlags = flags
w.wd[fd] = info
return true
}
func (w *watches) remove(fd int, path string) bool {
w.mu.Lock()
defer w.mu.Unlock()
isDir := w.wd[fd].isDir
delete(w.path, path)
delete(w.byUser, path)
parent := filepath.Dir(path)
delete(w.byDir[parent], fd)
if len(w.byDir[parent]) == 0 {
delete(w.byDir, parent)
}
delete(w.wd, fd)
delete(w.seen, path)
return isDir
}
func (w *watches) markSeen(path string, exists bool) {
w.mu.Lock()
defer w.mu.Unlock()
if exists {
w.seen[path] = struct{}{}
} else {
delete(w.seen, path)
}
}
func (w *watches) seenBefore(path string) bool {
w.mu.RLock()
defer w.mu.RUnlock()
_, ok := w.seen[path]
return ok
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
kq, closepipe, err := newKqueue()
if err != nil {
return nil, err
}
w := &kqueue{
shared: newShared(ev, errs),
Events: ev,
Errors: errs,
kq: kq,
closepipe: closepipe,
watches: newWatches(),
}
go w.readEvents()
return w, nil
}
// newKqueue creates a new kernel event queue and returns a descriptor.
//
// This registers a new event on closepipe, which will trigger an event when
// it's closed. This way we can use kevent() without timeout/polling; without
// the closepipe, it would block forever and we wouldn't be able to stop it at
// all.
func newKqueue() (kq int, closepipe [2]int, err error) {
kq, err = unix.Kqueue()
if err != nil {
return kq, closepipe, err
}
// Register the close pipe.
err = unix.Pipe(closepipe[:])
if err != nil {
unix.Close(kq)
return kq, closepipe, err
}
unix.CloseOnExec(closepipe[0])
unix.CloseOnExec(closepipe[1])
// Register changes to listen on the closepipe.
changes := make([]unix.Kevent_t, 1)
// SetKevent converts int to the platform-specific types.
unix.SetKevent(&changes[0], closepipe[0], unix.EVFILT_READ,
unix.EV_ADD|unix.EV_ENABLE|unix.EV_ONESHOT)
ok, err := unix.Kevent(kq, changes, nil, nil)
if ok == -1 {
unix.Close(kq)
unix.Close(closepipe[0])
unix.Close(closepipe[1])
return kq, closepipe, err
}
return kq, closepipe, nil
}
func (w *kqueue) Close() error {
if w.shared.close() {
return nil
}
pathsToRemove := w.watches.listPaths(false)
for _, name := range pathsToRemove {
w.Remove(name)
}
unix.Close(w.closepipe[1]) // Send "quit" message to readEvents
return nil
}
func (w *kqueue) Add(name string) error { return w.AddWith(name) }
func (w *kqueue) AddWith(name string, opts ...addOpt) error {
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
with := getOptions(opts...)
if !w.xSupports(with.op) {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
_, err := w.addWatch(name, noteAllEvents, false)
if err != nil {
return err
}
w.watches.addUserWatch(name)
return nil
}
func (w *kqueue) Remove(name string) error {
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), name)
}
return w.remove(name, true)
}
func (w *kqueue) remove(name string, unwatchFiles bool) error {
if w.isClosed() {
return nil
}
name = filepath.Clean(name)
info, ok := w.watches.byPath(name)
if !ok {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, name)
}
err := w.register([]int{info.wd}, unix.EV_DELETE, 0)
if err != nil {
return err
}
unix.Close(info.wd)
isDir := w.watches.remove(info.wd, name)
// Find all watched paths that are in this directory that are not external.
if unwatchFiles && isDir {
pathsToRemove := w.watches.watchesInDir(name)
for _, name := range pathsToRemove {
// Since these are internal, not much sense in propagating error to
// the user, as that will just confuse them with an error about a
// path they did not explicitly watch themselves.
w.Remove(name)
}
}
return nil
}
func (w *kqueue) WatchList() []string {
if w.isClosed() {
return nil
}
return w.watches.listPaths(true)
}
// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
// addWatch adds name to the watched file set; the flags are interpreted as
// described in kevent(2).
//
// Returns the real path to the file which was added, with symlinks resolved.
func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) {
if w.isClosed() {
return "", ErrClosed
}
name = filepath.Clean(name)
info, alreadyWatching := w.watches.byPath(name)
if !alreadyWatching {
fi, err := os.Lstat(name)
if err != nil {
return "", err
}
// Don't watch sockets or named pipes.
if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) {
return "", nil
}
// Follow symlinks, but only for paths added with Add(), and not paths
// we're adding from internalWatch from a listdir.
if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink {
link, err := os.Readlink(name)
if err != nil {
return "", err
}
if !filepath.IsAbs(link) {
link = filepath.Join(filepath.Dir(name), link)
}
_, alreadyWatching = w.watches.byPath(link)
if alreadyWatching {
// Add to watches so we don't get spurious Create events later
// on when we diff the directories.
w.watches.addLink(name, 0)
return link, nil
}
info.linkName = name
name = link
fi, err = os.Lstat(name)
if err != nil {
return "", err
}
}
// Retry on EINTR; open() can return EINTR in practice on macOS.
// See #354, and Go issues 11180 and 39237.
for {
info.wd, err = unix.Open(name, openMode, 0)
if err == nil {
break
}
if errors.Is(err, unix.EINTR) {
continue
}
return "", err
}
info.isDir = fi.IsDir()
}
err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags)
if err != nil {
unix.Close(info.wd)
return "", err
}
if !alreadyWatching {
w.watches.add(name, info.linkName, info.wd, info.isDir)
}
// Watch the directory if it has not been watched before, or if it was
// watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
if info.isDir {
watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
(!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE)
if !w.watches.updateDirFlags(name, flags) {
return "", nil
}
if watchDir {
d := name
if info.linkName != "" {
d = info.linkName
}
if err := w.watchDirectoryFiles(d); err != nil {
return "", err
}
}
}
return name, nil
}
// readEvents reads from kqueue and converts the received kevents into
// Event values that it sends down the Events channel.
func (w *kqueue) readEvents() {
defer func() {
close(w.Events)
close(w.Errors)
_ = unix.Close(w.kq)
unix.Close(w.closepipe[0])
}()
eventBuffer := make([]unix.Kevent_t, 10)
for {
kevents, err := w.read(eventBuffer)
// EINTR is okay, the syscall was interrupted before timeout expired.
if err != nil && err != unix.EINTR {
if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) {
return
}
}
for _, kevent := range kevents {
var (
wd = int(kevent.Ident)
mask = uint32(kevent.Fflags)
)
// Shut down the loop when the pipe is closed, but only after all
// other events have been processed.
if wd == w.closepipe[0] {
return
}
path, ok := w.watches.byWd(wd)
if debug {
internal.Debug(path.name, &kevent)
}
// On macOS it seems that sometimes an event with Ident=0 is
// delivered, and no other flags/information beyond that, even
// though we never saw such a file descriptor. For example in
// TestWatchSymlink/277 (usually at the end, but sometimes sooner):
//
// fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent)
// unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
// unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)}
//
// The first is a normal event, the second with Ident 0. No error
// flag, no data, no ... nothing.
//
// I read a bit through bsd/kern_event.c from the xnu source, but I
// don't really see an obvious location where this is triggered
// this doesn't seem intentional, but idk...
//
// Technically fd 0 is a valid descriptor, so only skip it if
// there's no path, and if we're on macOS.
if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" {
continue
}
event := w.newEvent(path.name, path.linkName, mask)
if event.Has(Rename) || event.Has(Remove) {
w.remove(event.Name, false)
w.watches.markSeen(event.Name, false)
}
if path.isDir && event.Has(Write) && !event.Has(Remove) {
w.dirChange(event.Name)
} else if !w.sendEvent(event) {
return
}
if event.Has(Remove) {
// Look for a file that may have overwritten this; for example,
// mv f1 f2 will delete f2, then create f2.
if path.isDir {
fileDir := filepath.Clean(event.Name)
_, found := w.watches.byPath(fileDir)
if found {
// TODO: this branch is never triggered in any test.
// Added in d6220df (2012).
// isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111
//
// I don't really get how this can be triggered either.
// And it wasn't triggered in the patch that added it,
// either.
//
// Original also had a comment:
// make sure the directory exists before we watch for
// changes. When we do a recursive watch and perform
// rm -rf, the parent directory might have gone
// missing, ignore the missing directory and let the
// upcoming delete event remove the watch from the
// parent directory.
err := w.dirChange(fileDir)
if !w.sendError(err) {
return
}
}
} else {
path := filepath.Clean(event.Name)
if fi, err := os.Lstat(path); err == nil {
err := w.sendCreateIfNew(path, fi)
if !w.sendError(err) {
return
}
}
}
}
}
}
}
// newEvent returns an platform-independent Event based on kqueue Fflags.
func (w *kqueue) newEvent(name, linkName string, mask uint32) Event {
e := Event{Name: name}
if linkName != "" {
// If the user watched "/path/link" then emit events as "/path/link"
// rather than "/path/target".
e.Name = linkName
}
if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
e.Op |= Remove
}
if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
e.Op |= Write
}
if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
e.Op |= Rename
}
if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
e.Op |= Chmod
}
// No point sending a write and delete event at the same time: if it's gone,
// then it's gone.
if e.Op.Has(Write) && e.Op.Has(Remove) {
e.Op &^= Write
}
return e
}
// watchDirectoryFiles to mimic inotify when adding a watch on a directory
func (w *kqueue) watchDirectoryFiles(dirPath string) error {
files, err := os.ReadDir(dirPath)
if err != nil {
return err
}
for _, f := range files {
path := filepath.Join(dirPath, f.Name())
fi, err := f.Info()
if err != nil {
return fmt.Errorf("%q: %w", path, err)
}
cleanPath, err := w.internalWatch(path, fi)
if err != nil {
// No permission to read the file; that's not a problem: just skip.
// But do add it to w.fileExists to prevent it from being picked up
// as a "new" file later (it still shows up in the directory
// listing).
switch {
case errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM):
cleanPath = filepath.Clean(path)
default:
return fmt.Errorf("%q: %w", path, err)
}
}
w.watches.markSeen(cleanPath, true)
}
return nil
}
// Search the directory for new files and send an event for them.
//
// This functionality is to have the BSD watcher match the inotify, which sends
// a create event for files created in a watched directory.
func (w *kqueue) dirChange(dir string) error {
files, err := os.ReadDir(dir)
if err != nil {
// Directory no longer exists: we can ignore this safely. kqueue will
// still give us the correct events.
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err)
}
for _, f := range files {
fi, err := f.Info()
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
}
err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi)
if err != nil {
// Don't need to send an error if this file isn't readable.
if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("fsnotify.dirChange: %w", err)
}
}
return nil
}
// Send a create event if the file isn't already being tracked, and start
// watching this file.
func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error {
if !w.watches.seenBefore(path) {
if !w.sendEvent(Event{Name: path, Op: Create}) {
return nil
}
}
// Like watchDirectoryFiles, but without doing another ReadDir.
path, err := w.internalWatch(path, fi)
if err != nil {
return err
}
w.watches.markSeen(path, true)
return nil
}
func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) {
if fi.IsDir() {
// mimic Linux providing delete events for subdirectories, but preserve
// the flags used if currently watching subdirectory
info, _ := w.watches.byPath(name)
return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true)
}
// Watch file to mimic Linux inotify.
return w.addWatch(name, noteAllEvents, true)
}
// Register events with the queue.
func (w *kqueue) register(fds []int, flags int, fflags uint32) error {
changes := make([]unix.Kevent_t, len(fds))
for i, fd := range fds {
// SetKevent converts int to the platform-specific types.
unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
changes[i].Fflags = fflags
}
// Register the events.
success, err := unix.Kevent(w.kq, changes, nil, nil)
if success == -1 {
return err
}
return nil
}
// read retrieves pending events, or waits until an event occurs.
func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) {
n, err := unix.Kevent(w.kq, nil, events, nil)
if err != nil {
return nil, err
}
return events[0:n], nil
}
func (w *kqueue) xSupports(op Op) bool {
//if runtime.GOOS == "freebsd" {
// return true // Supports everything.
//}
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
return false
}
return true
}

22
vendor/github.com/fsnotify/fsnotify/backend_other.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
//go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows)
package fsnotify
import "errors"
type other struct {
Events chan Event
Errors chan error
}
var defaultBufferSize = 0
func newBackend(ev chan Event, errs chan error) (backend, error) {
return nil, errors.New("fsnotify not supported on the current platform")
}
func (w *other) Close() error { return nil }
func (w *other) WatchList() []string { return nil }
func (w *other) Add(name string) error { return nil }
func (w *other) AddWith(name string, opts ...addOpt) error { return nil }
func (w *other) Remove(name string) error { return nil }
func (w *other) xSupports(op Op) bool { return false }

680
vendor/github.com/fsnotify/fsnotify/backend_windows.go generated vendored Normal file
View File

@@ -0,0 +1,680 @@
//go:build windows
// Windows backend based on ReadDirectoryChangesW()
//
// https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"time"
"unsafe"
"github.com/fsnotify/fsnotify/internal"
"golang.org/x/sys/windows"
)
type readDirChangesW struct {
Events chan Event
Errors chan error
port windows.Handle // Handle to completion port
input chan *input // Inputs to the reader are sent on this channel
done chan chan<- error
mu sync.Mutex // Protects access to watches, closed
watches watchMap // Map of watches (key: i-number)
closed bool // Set to true when Close() is first called
}
var defaultBufferSize = 50
func newBackend(ev chan Event, errs chan error) (backend, error) {
port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0)
if err != nil {
return nil, os.NewSyscallError("CreateIoCompletionPort", err)
}
w := &readDirChangesW{
Events: ev,
Errors: errs,
port: port,
watches: make(watchMap),
input: make(chan *input, 1),
done: make(chan chan<- error, 1),
}
go w.readEvents()
return w, nil
}
func (w *readDirChangesW) isClosed() bool {
w.mu.Lock()
defer w.mu.Unlock()
return w.closed
}
func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool {
if mask == 0 {
return false
}
event := w.newEvent(name, uint32(mask))
event.renamedFrom = renamedFrom
select {
case ch := <-w.done:
w.done <- ch
case w.Events <- event:
}
return true
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *readDirChangesW) sendError(err error) bool {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *readDirChangesW) Close() error {
if w.isClosed() {
return nil
}
w.mu.Lock()
w.closed = true
w.mu.Unlock()
// Send "done" message to the reader goroutine
ch := make(chan error)
w.done <- ch
if err := w.wakeupReader(); err != nil {
return err
}
return <-ch
}
func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) }
func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error {
if w.isClosed() {
return ErrClosed
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n",
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
}
with := getOptions(opts...)
if !w.xSupports(with.op) {
return fmt.Errorf("%w: %s", xErrUnsupported, with.op)
}
if with.bufsize < 4096 {
return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes")
}
in := &input{
op: opAddWatch,
path: filepath.Clean(name),
flags: sysFSALLEVENTS,
reply: make(chan error),
bufsize: with.bufsize,
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
func (w *readDirChangesW) Remove(name string) error {
if w.isClosed() {
return nil
}
if debug {
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n",
time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name))
}
in := &input{
op: opRemoveWatch,
path: filepath.Clean(name),
reply: make(chan error),
}
w.input <- in
if err := w.wakeupReader(); err != nil {
return err
}
return <-in.reply
}
func (w *readDirChangesW) WatchList() []string {
if w.isClosed() {
return nil
}
w.mu.Lock()
defer w.mu.Unlock()
entries := make([]string, 0, len(w.watches))
for _, entry := range w.watches {
for _, watchEntry := range entry {
for name := range watchEntry.names {
entries = append(entries, filepath.Join(watchEntry.path, name))
}
// the directory itself is being watched
if watchEntry.mask != 0 {
entries = append(entries, watchEntry.path)
}
}
}
return entries
}
// These options are from the old golang.org/x/exp/winfsnotify, where you could
// add various options to the watch. This has long since been removed.
//
// The "sys" in the name is misleading as they're not part of any "system".
//
// This should all be removed at some point, and just use windows.FILE_NOTIFY_*
const (
sysFSALLEVENTS = 0xfff
sysFSCREATE = 0x100
sysFSDELETE = 0x200
sysFSDELETESELF = 0x400
sysFSMODIFY = 0x2
sysFSMOVE = 0xc0
sysFSMOVEDFROM = 0x40
sysFSMOVEDTO = 0x80
sysFSMOVESELF = 0x800
sysFSIGNORED = 0x8000
)
func (w *readDirChangesW) newEvent(name string, mask uint32) Event {
e := Event{Name: name}
if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
e.Op |= Create
}
if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
e.Op |= Remove
}
if mask&sysFSMODIFY == sysFSMODIFY {
e.Op |= Write
}
if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
e.Op |= Rename
}
return e
}
const (
opAddWatch = iota
opRemoveWatch
)
const (
provisional uint64 = 1 << (32 + iota)
)
type input struct {
op int
path string
flags uint32
bufsize int
reply chan error
}
type inode struct {
handle windows.Handle
volume uint32
index uint64
}
type watch struct {
ov windows.Overlapped
ino *inode // i-number
recurse bool // Recursive watch?
path string // Directory path
mask uint64 // Directory itself is being watched with these notify flags
names map[string]uint64 // Map of names being watched and their notify flags
rename string // Remembers the old name while renaming a file
buf []byte // buffer, allocated later
}
type (
indexMap map[uint64]*watch
watchMap map[uint32]indexMap
)
func (w *readDirChangesW) wakeupReader() error {
err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil)
if err != nil {
return os.NewSyscallError("PostQueuedCompletionStatus", err)
}
return nil
}
func (w *readDirChangesW) getDir(pathname string) (dir string, err error) {
attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname))
if err != nil {
return "", os.NewSyscallError("GetFileAttributes", err)
}
if attr&windows.FILE_ATTRIBUTE_DIRECTORY != 0 {
dir = pathname
} else {
dir, _ = filepath.Split(pathname)
dir = filepath.Clean(dir)
}
return
}
func (w *readDirChangesW) getIno(path string) (ino *inode, err error) {
h, err := windows.CreateFile(windows.StringToUTF16Ptr(path),
windows.FILE_LIST_DIRECTORY,
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,
nil, windows.OPEN_EXISTING,
windows.FILE_FLAG_BACKUP_SEMANTICS|windows.FILE_FLAG_OVERLAPPED, 0)
if err != nil {
return nil, os.NewSyscallError("CreateFile", err)
}
var fi windows.ByHandleFileInformation
err = windows.GetFileInformationByHandle(h, &fi)
if err != nil {
windows.CloseHandle(h)
return nil, os.NewSyscallError("GetFileInformationByHandle", err)
}
ino = &inode{
handle: h,
volume: fi.VolumeSerialNumber,
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
}
return ino, nil
}
// Must run within the I/O thread.
func (m watchMap) get(ino *inode) *watch {
if i := m[ino.volume]; i != nil {
return i[ino.index]
}
return nil
}
// Must run within the I/O thread.
func (m watchMap) set(ino *inode, watch *watch) {
i := m[ino.volume]
if i == nil {
i = make(indexMap)
m[ino.volume] = i
}
i[ino.index] = watch
}
// Must run within the I/O thread.
func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error {
pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
if err != nil {
return err
}
ino, err := w.getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watchEntry := w.watches.get(ino)
w.mu.Unlock()
if watchEntry == nil {
_, err := windows.CreateIoCompletionPort(ino.handle, w.port, 0, 0)
if err != nil {
windows.CloseHandle(ino.handle)
return os.NewSyscallError("CreateIoCompletionPort", err)
}
watchEntry = &watch{
ino: ino,
path: dir,
names: make(map[string]uint64),
recurse: recurse,
buf: make([]byte, bufsize),
}
w.mu.Lock()
w.watches.set(ino, watchEntry)
w.mu.Unlock()
flags |= provisional
} else {
windows.CloseHandle(ino.handle)
}
if pathname == dir {
watchEntry.mask |= flags
} else {
watchEntry.names[filepath.Base(pathname)] |= flags
}
err = w.startRead(watchEntry)
if err != nil {
return err
}
if pathname == dir {
watchEntry.mask &= ^provisional
} else {
watchEntry.names[filepath.Base(pathname)] &= ^provisional
}
return nil
}
// Must run within the I/O thread.
func (w *readDirChangesW) remWatch(pathname string) error {
pathname, recurse := recursivePath(pathname)
dir, err := w.getDir(pathname)
if err != nil {
return err
}
ino, err := w.getIno(dir)
if err != nil {
return err
}
w.mu.Lock()
watch := w.watches.get(ino)
w.mu.Unlock()
if recurse && !watch.recurse {
return fmt.Errorf("can't use \\... with non-recursive watch %q", pathname)
}
err = windows.CloseHandle(ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err))
}
if watch == nil {
return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname)
}
if pathname == dir {
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
watch.mask = 0
} else {
name := filepath.Base(pathname)
w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
return w.startRead(watch)
}
// Must run within the I/O thread.
func (w *readDirChangesW) deleteWatch(watch *watch) {
for name, mask := range watch.names {
if mask&provisional == 0 {
w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED)
}
delete(watch.names, name)
}
if watch.mask != 0 {
if watch.mask&provisional == 0 {
w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED)
}
watch.mask = 0
}
}
// Must run within the I/O thread.
func (w *readDirChangesW) startRead(watch *watch) error {
err := windows.CancelIo(watch.ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CancelIo", err))
w.deleteWatch(watch)
}
mask := w.toWindowsFlags(watch.mask)
for _, m := range watch.names {
mask |= w.toWindowsFlags(m)
}
if mask == 0 {
err := windows.CloseHandle(watch.ino.handle)
if err != nil {
w.sendError(os.NewSyscallError("CloseHandle", err))
}
w.mu.Lock()
delete(w.watches[watch.ino.volume], watch.ino.index)
w.mu.Unlock()
return nil
}
// We need to pass the array, rather than the slice.
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&watch.buf))
rdErr := windows.ReadDirectoryChanges(watch.ino.handle,
(*byte)(unsafe.Pointer(hdr.Data)), uint32(hdr.Len),
watch.recurse, mask, nil, &watch.ov, 0)
if rdErr != nil {
err := os.NewSyscallError("ReadDirectoryChanges", rdErr)
if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
// Watched directory was probably removed
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
err = nil
}
w.deleteWatch(watch)
w.startRead(watch)
return err
}
return nil
}
// readEvents reads from the I/O completion port, converts the
// received events into Event objects and sends them via the Events channel.
// Entry point to the I/O thread.
func (w *readDirChangesW) readEvents() {
var (
n uint32
key uintptr
ov *windows.Overlapped
)
runtime.LockOSThread()
for {
// This error is handled after the watch == nil check below.
qErr := windows.GetQueuedCompletionStatus(w.port, &n, &key, &ov, windows.INFINITE)
watch := (*watch)(unsafe.Pointer(ov))
if watch == nil {
select {
case ch := <-w.done:
w.mu.Lock()
var indexes []indexMap
for _, index := range w.watches {
indexes = append(indexes, index)
}
w.mu.Unlock()
for _, index := range indexes {
for _, watch := range index {
w.deleteWatch(watch)
w.startRead(watch)
}
}
err := windows.CloseHandle(w.port)
if err != nil {
err = os.NewSyscallError("CloseHandle", err)
}
close(w.Events)
close(w.Errors)
ch <- err
return
case in := <-w.input:
switch in.op {
case opAddWatch:
in.reply <- w.addWatch(in.path, uint64(in.flags), in.bufsize)
case opRemoveWatch:
in.reply <- w.remWatch(in.path)
}
default:
}
continue
}
switch qErr {
case nil:
// No error
case windows.ERROR_MORE_DATA:
if watch == nil {
w.sendError(errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer"))
} else {
// The i/o succeeded but the buffer is full.
// In theory we should be building up a full packet.
// In practice we can get away with just carrying on.
n = uint32(unsafe.Sizeof(watch.buf))
}
case windows.ERROR_ACCESS_DENIED:
// Watched directory was probably removed
w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF)
w.deleteWatch(watch)
w.startRead(watch)
continue
case windows.ERROR_OPERATION_ABORTED:
// CancelIo was called on this handle
continue
default:
w.sendError(os.NewSyscallError("GetQueuedCompletionPort", qErr))
continue
}
var offset uint32
for {
if n == 0 {
w.sendError(ErrEventOverflow)
break
}
// Point "raw" to the event in the buffer
raw := (*windows.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
// Create a buf that is the size of the path name
size := int(raw.FileNameLength / 2)
var buf []uint16
// TODO: Use unsafe.Slice in Go 1.17; https://stackoverflow.com/questions/51187973
sh := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
sh.Data = uintptr(unsafe.Pointer(&raw.FileName))
sh.Len = size
sh.Cap = size
name := windows.UTF16ToString(buf)
fullname := filepath.Join(watch.path, name)
if debug {
internal.Debug(fullname, raw.Action)
}
var mask uint64
switch raw.Action {
case windows.FILE_ACTION_REMOVED:
mask = sysFSDELETESELF
case windows.FILE_ACTION_MODIFIED:
mask = sysFSMODIFY
case windows.FILE_ACTION_RENAMED_OLD_NAME:
watch.rename = name
case windows.FILE_ACTION_RENAMED_NEW_NAME:
// Update saved path of all sub-watches.
old := filepath.Join(watch.path, watch.rename)
w.mu.Lock()
for _, watchMap := range w.watches {
for _, ww := range watchMap {
if strings.HasPrefix(ww.path, old) {
ww.path = filepath.Join(fullname, strings.TrimPrefix(ww.path, old))
}
}
}
w.mu.Unlock()
if watch.names[watch.rename] != 0 {
watch.names[name] |= watch.names[watch.rename]
delete(watch.names, watch.rename)
mask = sysFSMOVESELF
}
}
if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME {
w.sendEvent(fullname, "", watch.names[name]&mask)
}
if raw.Action == windows.FILE_ACTION_REMOVED {
w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED)
delete(watch.names, name)
}
if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action))
} else {
w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action))
}
if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME {
w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask)
}
// Move to the next event in the buffer
if raw.NextEntryOffset == 0 {
break
}
offset += raw.NextEntryOffset
// Error!
if offset >= n {
//lint:ignore ST1005 Windows should be capitalized
w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed"))
break
}
}
if err := w.startRead(watch); err != nil {
w.sendError(err)
}
}
}
func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 {
var m uint32
if mask&sysFSMODIFY != 0 {
m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE
}
if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
m |= windows.FILE_NOTIFY_CHANGE_FILE_NAME | windows.FILE_NOTIFY_CHANGE_DIR_NAME
}
return m
}
func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 {
switch action {
case windows.FILE_ACTION_ADDED:
return sysFSCREATE
case windows.FILE_ACTION_REMOVED:
return sysFSDELETE
case windows.FILE_ACTION_MODIFIED:
return sysFSMODIFY
case windows.FILE_ACTION_RENAMED_OLD_NAME:
return sysFSMOVEDFROM
case windows.FILE_ACTION_RENAMED_NEW_NAME:
return sysFSMOVEDTO
}
return 0
}
func (w *readDirChangesW) xSupports(op Op) bool {
if op.Has(xUnportableOpen) || op.Has(xUnportableRead) ||
op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) {
return false
}
return true
}

496
vendor/github.com/fsnotify/fsnotify/fsnotify.go generated vendored Normal file
View File

@@ -0,0 +1,496 @@
// Package fsnotify provides a cross-platform interface for file system
// notifications.
//
// Currently supported systems:
//
// - Linux via inotify
// - BSD, macOS via kqueue
// - Windows via ReadDirectoryChangesW
// - illumos via FEN
//
// # FSNOTIFY_DEBUG
//
// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to
// stderr. This can be useful to track down some problems, especially in cases
// where fsnotify is used as an indirect dependency.
//
// Every event will be printed as soon as there's something useful to print,
// with as little processing from fsnotify.
//
// Example output:
//
// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1"
// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1"
// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1"
package fsnotify
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
)
// Watcher watches a set of paths, delivering events on a channel.
//
// A watcher should not be copied (e.g. pass it by pointer, rather than by
// value).
//
// # Linux notes
//
// When a file is removed a Remove event won't be emitted until all file
// descriptors are closed, and deletes will always emit a Chmod. For example:
//
// fp := os.Open("file")
// os.Remove("file") // Triggers Chmod
// fp.Close() // Triggers Remove
//
// This is the event that inotify sends, so not much can be changed about this.
//
// The fs.inotify.max_user_watches sysctl variable specifies the upper limit
// for the number of watches per user, and fs.inotify.max_user_instances
// specifies the maximum number of inotify instances per user. Every Watcher you
// create is an "instance", and every path you add is a "watch".
//
// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and
// /proc/sys/fs/inotify/max_user_instances
//
// To increase them you can use sysctl or write the value to the /proc file:
//
// # Default values on Linux 5.18
// sysctl fs.inotify.max_user_watches=124983
// sysctl fs.inotify.max_user_instances=128
//
// To make the changes persist on reboot edit /etc/sysctl.conf or
// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check
// your distro's documentation):
//
// fs.inotify.max_user_watches=124983
// fs.inotify.max_user_instances=128
//
// Reaching the limit will result in a "no space left on device" or "too many open
// files" error.
//
// # kqueue notes (macOS, BSD)
//
// kqueue requires opening a file descriptor for every file that's being watched;
// so if you're watching a directory with five files then that's six file
// descriptors. You will run in to your system's "max open files" limit faster on
// these platforms.
//
// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to
// control the maximum number of open files, as well as /etc/login.conf on BSD
// systems.
//
// # Windows notes
//
// Paths can be added as "C:\\path\\to\\dir", but forward slashes
// ("C:/path/to/dir") will also work.
//
// When a watched directory is removed it will always send an event for the
// directory itself, but may not send events for all files in that directory.
// Sometimes it will send events for all files, sometimes it will send no
// events, and often only for some files.
//
// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest
// value that is guaranteed to work with SMB filesystems. If you have many
// events in quick succession this may not be enough, and you will have to use
// [WithBufferSize] to increase the value.
type Watcher struct {
b backend
// Events sends the filesystem change events.
//
// fsnotify can send the following events; a "path" here can refer to a
// file, directory, symbolic link, or special file like a FIFO.
//
// fsnotify.Create A new path was created; this may be followed by one
// or more Write events if data also gets written to a
// file.
//
// fsnotify.Remove A path was removed.
//
// fsnotify.Rename A path was renamed. A rename is always sent with the
// old path as Event.Name, and a Create event will be
// sent with the new name. Renames are only sent for
// paths that are currently watched; e.g. moving an
// unmonitored file into a monitored directory will
// show up as just a Create. Similarly, renaming a file
// to outside a monitored directory will show up as
// only a Rename.
//
// fsnotify.Write A file or named pipe was written to. A Truncate will
// also trigger a Write. A single "write action"
// initiated by the user may show up as one or multiple
// writes, depending on when the system syncs things to
// disk. For example when compiling a large Go program
// you may get hundreds of Write events, and you may
// want to wait until you've stopped receiving them
// (see the dedup example in cmd/fsnotify).
//
// Some systems may send Write event for directories
// when the directory content changes.
//
// fsnotify.Chmod Attributes were changed. On Linux this is also sent
// when a file is removed (or more accurately, when a
// link to an inode is removed). On kqueue it's sent
// when a file is truncated. On Windows it's never
// sent.
Events chan Event
// Errors sends any errors.
Errors chan error
}
// Event represents a file system notification.
type Event struct {
// Path to the file or directory.
//
// Paths are relative to the input; for example with Add("dir") the Name
// will be set to "dir/file" if you create that file, but if you use
// Add("/path/to/dir") it will be "/path/to/dir/file".
Name string
// File operation that triggered the event.
//
// This is a bitmask and some systems may send multiple operations at once.
// Use the Event.Has() method instead of comparing with ==.
Op Op
// Create events will have this set to the old path if it's a rename. This
// only works when both the source and destination are watched. It's not
// reliable when watching individual files, only directories.
//
// For example "mv /tmp/file /tmp/rename" will emit:
//
// Event{Op: Rename, Name: "/tmp/file"}
// Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"}
renamedFrom string
}
// Op describes a set of file operations.
type Op uint32
// The operations fsnotify can trigger; see the documentation on [Watcher] for a
// full description, and check them with [Event.Has].
const (
// A new pathname was created.
Create Op = 1 << iota
// The pathname was written to; this does *not* mean the write has finished,
// and a write can be followed by more writes.
Write
// The path was removed; any watches on it will be removed. Some "remove"
// operations may trigger a Rename if the file is actually moved (for
// example "remove to trash" is often a rename).
Remove
// The path was renamed to something else; any watches on it will be
// removed.
Rename
// File attributes were changed.
//
// It's generally not recommended to take action on this event, as it may
// get triggered very frequently by some software. For example, Spotlight
// indexing on macOS, anti-virus software, backup software, etc.
Chmod
// File descriptor was opened.
//
// Only works on Linux and FreeBSD.
xUnportableOpen
// File was read from.
//
// Only works on Linux and FreeBSD.
xUnportableRead
// File opened for writing was closed.
//
// Only works on Linux and FreeBSD.
//
// The advantage of using this over Write is that it's more reliable than
// waiting for Write events to stop. It's also faster (if you're not
// listening to Write events): copying a file of a few GB can easily
// generate tens of thousands of Write events in a short span of time.
xUnportableCloseWrite
// File opened for reading was closed.
//
// Only works on Linux and FreeBSD.
xUnportableCloseRead
)
var (
// ErrNonExistentWatch is used when Remove() is called on a path that's not
// added.
ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch")
// ErrClosed is used when trying to operate on a closed Watcher.
ErrClosed = errors.New("fsnotify: watcher already closed")
// ErrEventOverflow is reported from the Errors channel when there are too
// many events:
//
// - inotify: inotify returns IN_Q_OVERFLOW because there are too
// many queued events (the fs.inotify.max_queued_events
// sysctl can be used to increase this).
// - windows: The buffer size is too small; WithBufferSize() can be used to increase it.
// - kqueue, fen: Not used.
ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow")
// ErrUnsupported is returned by AddWith() when WithOps() specified an
// Unportable event that's not supported on this platform.
//lint:ignore ST1012 not relevant
xErrUnsupported = errors.New("fsnotify: not supported with this backend")
)
// NewWatcher creates a new Watcher.
func NewWatcher() (*Watcher, error) {
ev, errs := make(chan Event, defaultBufferSize), make(chan error)
b, err := newBackend(ev, errs)
if err != nil {
return nil, err
}
return &Watcher{b: b, Events: ev, Errors: errs}, nil
}
// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events
// channel.
//
// The main use case for this is situations with a very large number of events
// where the kernel buffer size can't be increased (e.g. due to lack of
// permissions). An unbuffered Watcher will perform better for almost all use
// cases, and whenever possible you will be better off increasing the kernel
// buffers instead of adding a large userspace buffer.
func NewBufferedWatcher(sz uint) (*Watcher, error) {
ev, errs := make(chan Event, sz), make(chan error)
b, err := newBackend(ev, errs)
if err != nil {
return nil, err
}
return &Watcher{b: b, Events: ev, Errors: errs}, nil
}
// Add starts monitoring the path for changes.
//
// A path can only be watched once; watching it more than once is a no-op and will
// not return an error. Paths that do not yet exist on the filesystem cannot be
// watched.
//
// A watch will be automatically removed if the watched path is deleted or
// renamed. The exception is the Windows backend, which doesn't remove the
// watcher on renames.
//
// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special
// filesystems (/proc, /sys, etc.) generally don't work.
//
// Returns [ErrClosed] if [Watcher.Close] was called.
//
// See [Watcher.AddWith] for a version that allows adding options.
//
// # Watching directories
//
// All files in a directory are monitored, including new files that are created
// after the watcher is started. Subdirectories are not watched (i.e. it's
// non-recursive).
//
// # Watching files
//
// Watching individual files (rather than directories) is generally not
// recommended as many programs (especially editors) update files atomically: it
// will write to a temporary file which is then moved to destination,
// overwriting the original (or some variant thereof). The watcher on the
// original file is now lost, as that no longer exists.
//
// The upshot of this is that a power failure or crash won't leave a
// half-written file.
//
// Watch the parent directory and use Event.Name to filter out files you're not
// interested in. There is an example of this in cmd/fsnotify/file.go.
func (w *Watcher) Add(path string) error { return w.b.Add(path) }
// AddWith is like [Watcher.Add], but allows adding options. When using Add()
// the defaults described below are used.
//
// Possible options are:
//
// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on
// other platforms. The default is 64K (65536 bytes).
func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) }
// Remove stops monitoring the path for changes.
//
// Directories are always removed non-recursively. For example, if you added
// /tmp/dir and /tmp/dir/subdir then you will need to remove both.
//
// Removing a path that has not yet been added returns [ErrNonExistentWatch].
//
// Returns nil if [Watcher.Close] was called.
func (w *Watcher) Remove(path string) error { return w.b.Remove(path) }
// Close removes all watches and closes the Events channel.
func (w *Watcher) Close() error { return w.b.Close() }
// WatchList returns all paths explicitly added with [Watcher.Add] (and are not
// yet removed).
//
// The order is undefined, and may differ per call. Returns nil if
// [Watcher.Close] was called.
func (w *Watcher) WatchList() []string { return w.b.WatchList() }
// Supports reports if all the listed operations are supported by this platform.
//
// Create, Write, Remove, Rename, and Chmod are always supported. It can only
// return false for an Op starting with Unportable.
func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) }
func (o Op) String() string {
var b strings.Builder
if o.Has(Create) {
b.WriteString("|CREATE")
}
if o.Has(Remove) {
b.WriteString("|REMOVE")
}
if o.Has(Write) {
b.WriteString("|WRITE")
}
if o.Has(xUnportableOpen) {
b.WriteString("|OPEN")
}
if o.Has(xUnportableRead) {
b.WriteString("|READ")
}
if o.Has(xUnportableCloseWrite) {
b.WriteString("|CLOSE_WRITE")
}
if o.Has(xUnportableCloseRead) {
b.WriteString("|CLOSE_READ")
}
if o.Has(Rename) {
b.WriteString("|RENAME")
}
if o.Has(Chmod) {
b.WriteString("|CHMOD")
}
if b.Len() == 0 {
return "[no events]"
}
return b.String()[1:]
}
// Has reports if this operation has the given operation.
func (o Op) Has(h Op) bool { return o&h != 0 }
// Has reports if this event has the given operation.
func (e Event) Has(op Op) bool { return e.Op.Has(op) }
// String returns a string representation of the event with their path.
func (e Event) String() string {
if e.renamedFrom != "" {
return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom)
}
return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name)
}
type (
backend interface {
Add(string) error
AddWith(string, ...addOpt) error
Remove(string) error
WatchList() []string
Close() error
xSupports(Op) bool
}
addOpt func(opt *withOpts)
withOpts struct {
bufsize int
op Op
noFollow bool
sendCreate bool
}
)
var debug = func() bool {
// Check for exactly "1" (rather than mere existence) so we can add
// options/flags in the future. I don't know if we ever want that, but it's
// nice to leave the option open.
return os.Getenv("FSNOTIFY_DEBUG") == "1"
}()
var defaultOpts = withOpts{
bufsize: 65536, // 64K
op: Create | Write | Remove | Rename | Chmod,
}
func getOptions(opts ...addOpt) withOpts {
with := defaultOpts
for _, o := range opts {
if o != nil {
o(&with)
}
}
return with
}
// WithBufferSize sets the [ReadDirectoryChangesW] buffer size.
//
// This only has effect on Windows systems, and is a no-op for other backends.
//
// The default value is 64K (65536 bytes) which is the highest value that works
// on all filesystems and should be enough for most applications, but if you
// have a large burst of events it may not be enough. You can increase it if
// you're hitting "queue or buffer overflow" errors ([ErrEventOverflow]).
//
// [ReadDirectoryChangesW]: https://learn.microsoft.com/en-gb/windows/win32/api/winbase/nf-winbase-readdirectorychangesw
func WithBufferSize(bytes int) addOpt {
return func(opt *withOpts) { opt.bufsize = bytes }
}
// WithOps sets which operations to listen for. The default is [Create],
// [Write], [Remove], [Rename], and [Chmod].
//
// Excluding operations you're not interested in can save quite a bit of CPU
// time; in some use cases there may be hundreds of thousands of useless Write
// or Chmod operations per second.
//
// This can also be used to add unportable operations not supported by all
// platforms; unportable operations all start with "Unportable":
// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and
// [UnportableCloseRead].
//
// AddWith returns an error when using an unportable operation that's not
// supported. Use [Watcher.Support] to check for support.
func withOps(op Op) addOpt {
return func(opt *withOpts) { opt.op = op }
}
// WithNoFollow disables following symlinks, so the symlinks themselves are
// watched.
func withNoFollow() addOpt {
return func(opt *withOpts) { opt.noFollow = true }
}
// "Internal" option for recursive watches on inotify.
func withCreate() addOpt {
return func(opt *withOpts) { opt.sendCreate = true }
}
var enableRecurse = false
// Check if this path is recursive (ends with "/..." or "\..."), and return the
// path with the /... stripped.
func recursivePath(path string) (string, bool) {
path = filepath.Clean(path)
if !enableRecurse { // Only enabled in tests for now.
return path, false
}
if filepath.Base(path) == "..." {
return filepath.Dir(path), true
}
return path, false
}

39
vendor/github.com/fsnotify/fsnotify/internal/darwin.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
//go:build darwin
package internal
import (
"syscall"
"golang.org/x/sys/unix"
)
var (
ErrSyscallEACCES = syscall.EACCES
ErrUnixEACCES = unix.EACCES
)
var maxfiles uint64
func SetRlimit() {
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
var l syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
if err == nil && l.Cur != l.Max {
l.Cur = l.Max
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
}
maxfiles = l.Cur
if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles {
maxfiles = uint64(n)
}
if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles {
maxfiles = uint64(n)
}
}
func Maxfiles() uint64 { return maxfiles }
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }

View File

@@ -0,0 +1,57 @@
package internal
import "golang.org/x/sys/unix"
var names = []struct {
n string
m uint32
}{
{"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE},
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
{"NOTE_BACKGROUND", unix.NOTE_BACKGROUND},
{"NOTE_CHILD", unix.NOTE_CHILD},
{"NOTE_CRITICAL", unix.NOTE_CRITICAL},
{"NOTE_DELETE", unix.NOTE_DELETE},
{"NOTE_EXEC", unix.NOTE_EXEC},
{"NOTE_EXIT", unix.NOTE_EXIT},
{"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS},
{"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR},
{"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL},
{"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL},
{"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK},
{"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY},
{"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED},
{"NOTE_EXTEND", unix.NOTE_EXTEND},
{"NOTE_FFAND", unix.NOTE_FFAND},
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
{"NOTE_FFNOP", unix.NOTE_FFNOP},
{"NOTE_FFOR", unix.NOTE_FFOR},
{"NOTE_FORK", unix.NOTE_FORK},
{"NOTE_FUNLOCK", unix.NOTE_FUNLOCK},
{"NOTE_LEEWAY", unix.NOTE_LEEWAY},
{"NOTE_LINK", unix.NOTE_LINK},
{"NOTE_LOWAT", unix.NOTE_LOWAT},
{"NOTE_MACHTIME", unix.NOTE_MACHTIME},
{"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME},
{"NOTE_NONE", unix.NOTE_NONE},
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
{"NOTE_OOB", unix.NOTE_OOB},
//{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!)
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
{"NOTE_REAP", unix.NOTE_REAP},
{"NOTE_RENAME", unix.NOTE_RENAME},
{"NOTE_REVOKE", unix.NOTE_REVOKE},
{"NOTE_SECONDS", unix.NOTE_SECONDS},
{"NOTE_SIGNAL", unix.NOTE_SIGNAL},
{"NOTE_TRACK", unix.NOTE_TRACK},
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
{"NOTE_USECONDS", unix.NOTE_USECONDS},
{"NOTE_VM_ERROR", unix.NOTE_VM_ERROR},
{"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE},
{"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE},
{"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE},
{"NOTE_WRITE", unix.NOTE_WRITE},
}

View File

@@ -0,0 +1,33 @@
package internal
import "golang.org/x/sys/unix"
var names = []struct {
n string
m uint32
}{
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
{"NOTE_CHILD", unix.NOTE_CHILD},
{"NOTE_DELETE", unix.NOTE_DELETE},
{"NOTE_EXEC", unix.NOTE_EXEC},
{"NOTE_EXIT", unix.NOTE_EXIT},
{"NOTE_EXTEND", unix.NOTE_EXTEND},
{"NOTE_FFAND", unix.NOTE_FFAND},
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
{"NOTE_FFNOP", unix.NOTE_FFNOP},
{"NOTE_FFOR", unix.NOTE_FFOR},
{"NOTE_FORK", unix.NOTE_FORK},
{"NOTE_LINK", unix.NOTE_LINK},
{"NOTE_LOWAT", unix.NOTE_LOWAT},
{"NOTE_OOB", unix.NOTE_OOB},
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
{"NOTE_RENAME", unix.NOTE_RENAME},
{"NOTE_REVOKE", unix.NOTE_REVOKE},
{"NOTE_TRACK", unix.NOTE_TRACK},
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
{"NOTE_WRITE", unix.NOTE_WRITE},
}

View File

@@ -0,0 +1,42 @@
package internal
import "golang.org/x/sys/unix"
var names = []struct {
n string
m uint32
}{
{"NOTE_ABSTIME", unix.NOTE_ABSTIME},
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
{"NOTE_CHILD", unix.NOTE_CHILD},
{"NOTE_CLOSE", unix.NOTE_CLOSE},
{"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE},
{"NOTE_DELETE", unix.NOTE_DELETE},
{"NOTE_EXEC", unix.NOTE_EXEC},
{"NOTE_EXIT", unix.NOTE_EXIT},
{"NOTE_EXTEND", unix.NOTE_EXTEND},
{"NOTE_FFAND", unix.NOTE_FFAND},
{"NOTE_FFCOPY", unix.NOTE_FFCOPY},
{"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK},
{"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK},
{"NOTE_FFNOP", unix.NOTE_FFNOP},
{"NOTE_FFOR", unix.NOTE_FFOR},
{"NOTE_FILE_POLL", unix.NOTE_FILE_POLL},
{"NOTE_FORK", unix.NOTE_FORK},
{"NOTE_LINK", unix.NOTE_LINK},
{"NOTE_LOWAT", unix.NOTE_LOWAT},
{"NOTE_MSECONDS", unix.NOTE_MSECONDS},
{"NOTE_NSECONDS", unix.NOTE_NSECONDS},
{"NOTE_OPEN", unix.NOTE_OPEN},
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
{"NOTE_READ", unix.NOTE_READ},
{"NOTE_RENAME", unix.NOTE_RENAME},
{"NOTE_REVOKE", unix.NOTE_REVOKE},
{"NOTE_SECONDS", unix.NOTE_SECONDS},
{"NOTE_TRACK", unix.NOTE_TRACK},
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
{"NOTE_TRIGGER", unix.NOTE_TRIGGER},
{"NOTE_USECONDS", unix.NOTE_USECONDS},
{"NOTE_WRITE", unix.NOTE_WRITE},
}

View File

@@ -0,0 +1,32 @@
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
package internal
import (
"fmt"
"os"
"strings"
"time"
"golang.org/x/sys/unix"
)
func Debug(name string, kevent *unix.Kevent_t) {
mask := uint32(kevent.Fflags)
var (
l []string
unknown = mask
)
for _, n := range names {
if mask&n.m == n.m {
l = append(l, n.n)
unknown ^= n.m
}
}
if unknown > 0 {
l = append(l, fmt.Sprintf("0x%x", unknown))
}
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n",
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
}

View File

@@ -0,0 +1,56 @@
package internal
import (
"fmt"
"os"
"strings"
"time"
"golang.org/x/sys/unix"
)
func Debug(name string, mask, cookie uint32) {
names := []struct {
n string
m uint32
}{
{"IN_ACCESS", unix.IN_ACCESS},
{"IN_ATTRIB", unix.IN_ATTRIB},
{"IN_CLOSE", unix.IN_CLOSE},
{"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE},
{"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE},
{"IN_CREATE", unix.IN_CREATE},
{"IN_DELETE", unix.IN_DELETE},
{"IN_DELETE_SELF", unix.IN_DELETE_SELF},
{"IN_IGNORED", unix.IN_IGNORED},
{"IN_ISDIR", unix.IN_ISDIR},
{"IN_MODIFY", unix.IN_MODIFY},
{"IN_MOVE", unix.IN_MOVE},
{"IN_MOVED_FROM", unix.IN_MOVED_FROM},
{"IN_MOVED_TO", unix.IN_MOVED_TO},
{"IN_MOVE_SELF", unix.IN_MOVE_SELF},
{"IN_OPEN", unix.IN_OPEN},
{"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW},
{"IN_UNMOUNT", unix.IN_UNMOUNT},
}
var (
l []string
unknown = mask
)
for _, n := range names {
if mask&n.m == n.m {
l = append(l, n.n)
unknown ^= n.m
}
}
if unknown > 0 {
l = append(l, fmt.Sprintf("0x%x", unknown))
}
var c string
if cookie > 0 {
c = fmt.Sprintf("(cookie: %d) ", cookie)
}
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n",
time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name)
}

View File

@@ -0,0 +1,25 @@
package internal
import "golang.org/x/sys/unix"
var names = []struct {
n string
m uint32
}{
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
{"NOTE_CHILD", unix.NOTE_CHILD},
{"NOTE_DELETE", unix.NOTE_DELETE},
{"NOTE_EXEC", unix.NOTE_EXEC},
{"NOTE_EXIT", unix.NOTE_EXIT},
{"NOTE_EXTEND", unix.NOTE_EXTEND},
{"NOTE_FORK", unix.NOTE_FORK},
{"NOTE_LINK", unix.NOTE_LINK},
{"NOTE_LOWAT", unix.NOTE_LOWAT},
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
{"NOTE_RENAME", unix.NOTE_RENAME},
{"NOTE_REVOKE", unix.NOTE_REVOKE},
{"NOTE_TRACK", unix.NOTE_TRACK},
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
{"NOTE_WRITE", unix.NOTE_WRITE},
}

View File

@@ -0,0 +1,28 @@
package internal
import "golang.org/x/sys/unix"
var names = []struct {
n string
m uint32
}{
{"NOTE_ATTRIB", unix.NOTE_ATTRIB},
// {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386?
{"NOTE_CHILD", unix.NOTE_CHILD},
{"NOTE_DELETE", unix.NOTE_DELETE},
{"NOTE_EOF", unix.NOTE_EOF},
{"NOTE_EXEC", unix.NOTE_EXEC},
{"NOTE_EXIT", unix.NOTE_EXIT},
{"NOTE_EXTEND", unix.NOTE_EXTEND},
{"NOTE_FORK", unix.NOTE_FORK},
{"NOTE_LINK", unix.NOTE_LINK},
{"NOTE_LOWAT", unix.NOTE_LOWAT},
{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK},
{"NOTE_PDATAMASK", unix.NOTE_PDATAMASK},
{"NOTE_RENAME", unix.NOTE_RENAME},
{"NOTE_REVOKE", unix.NOTE_REVOKE},
{"NOTE_TRACK", unix.NOTE_TRACK},
{"NOTE_TRACKERR", unix.NOTE_TRACKERR},
{"NOTE_TRUNCATE", unix.NOTE_TRUNCATE},
{"NOTE_WRITE", unix.NOTE_WRITE},
}

View File

@@ -0,0 +1,45 @@
package internal
import (
"fmt"
"os"
"strings"
"time"
"golang.org/x/sys/unix"
)
func Debug(name string, mask int32) {
names := []struct {
n string
m int32
}{
{"FILE_ACCESS", unix.FILE_ACCESS},
{"FILE_MODIFIED", unix.FILE_MODIFIED},
{"FILE_ATTRIB", unix.FILE_ATTRIB},
{"FILE_TRUNC", unix.FILE_TRUNC},
{"FILE_NOFOLLOW", unix.FILE_NOFOLLOW},
{"FILE_DELETE", unix.FILE_DELETE},
{"FILE_RENAME_TO", unix.FILE_RENAME_TO},
{"FILE_RENAME_FROM", unix.FILE_RENAME_FROM},
{"UNMOUNTED", unix.UNMOUNTED},
{"MOUNTEDOVER", unix.MOUNTEDOVER},
{"FILE_EXCEPTION", unix.FILE_EXCEPTION},
}
var (
l []string
unknown = mask
)
for _, n := range names {
if mask&n.m == n.m {
l = append(l, n.n)
unknown ^= n.m
}
}
if unknown > 0 {
l = append(l, fmt.Sprintf("0x%x", unknown))
}
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n",
time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name)
}

View File

@@ -0,0 +1,40 @@
package internal
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"golang.org/x/sys/windows"
)
func Debug(name string, mask uint32) {
names := []struct {
n string
m uint32
}{
{"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED},
{"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED},
{"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED},
{"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME},
{"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME},
}
var (
l []string
unknown = mask
)
for _, n := range names {
if mask&n.m == n.m {
l = append(l, n.n)
unknown ^= n.m
}
}
if unknown > 0 {
l = append(l, fmt.Sprintf("0x%x", unknown))
}
fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n",
time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name))
}

View File

@@ -0,0 +1,31 @@
//go:build freebsd
package internal
import (
"syscall"
"golang.org/x/sys/unix"
)
var (
ErrSyscallEACCES = syscall.EACCES
ErrUnixEACCES = unix.EACCES
)
var maxfiles uint64
func SetRlimit() {
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
var l syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
if err == nil && l.Cur != l.Max {
l.Cur = l.Max
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
}
maxfiles = uint64(l.Cur)
}
func Maxfiles() uint64 { return maxfiles }
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) }

View File

@@ -0,0 +1,2 @@
// Package internal contains some helpers.
package internal

31
vendor/github.com/fsnotify/fsnotify/internal/unix.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
//go:build !windows && !darwin && !freebsd && !plan9
package internal
import (
"syscall"
"golang.org/x/sys/unix"
)
var (
ErrSyscallEACCES = syscall.EACCES
ErrUnixEACCES = unix.EACCES
)
var maxfiles uint64
func SetRlimit() {
// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/
var l syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l)
if err == nil && l.Cur != l.Max {
l.Cur = l.Max
syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l)
}
maxfiles = uint64(l.Cur)
}
func Maxfiles() uint64 { return maxfiles }
func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) }
func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) }

View File

@@ -0,0 +1,7 @@
//go:build !windows
package internal
func HasPrivilegesForSymlink() bool {
return true
}

View File

@@ -0,0 +1,41 @@
//go:build windows
package internal
import (
"errors"
"golang.org/x/sys/windows"
)
// Just a dummy.
var (
ErrSyscallEACCES = errors.New("dummy")
ErrUnixEACCES = errors.New("dummy")
)
func SetRlimit() {}
func Maxfiles() uint64 { return 1<<64 - 1 }
func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") }
func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") }
func HasPrivilegesForSymlink() bool {
var sid *windows.SID
err := windows.AllocateAndInitializeSid(
&windows.SECURITY_NT_AUTHORITY,
2,
windows.SECURITY_BUILTIN_DOMAIN_RID,
windows.DOMAIN_ALIAS_RID_ADMINS,
0, 0, 0, 0, 0, 0,
&sid)
if err != nil {
return false
}
defer windows.FreeSid(sid)
token := windows.Token(0)
member, err := token.IsMember(sid)
if err != nil {
return false
}
return member || token.IsElevated()
}

64
vendor/github.com/fsnotify/fsnotify/shared.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
package fsnotify
import "sync"
type shared struct {
Events chan Event
Errors chan error
done chan struct{}
mu sync.Mutex
}
func newShared(ev chan Event, errs chan error) *shared {
return &shared{
Events: ev,
Errors: errs,
done: make(chan struct{}),
}
}
// Returns true if the event was sent, or false if watcher is closed.
func (w *shared) sendEvent(e Event) bool {
if e.Op == 0 {
return true
}
select {
case <-w.done:
return false
case w.Events <- e:
return true
}
}
// Returns true if the error was sent, or false if watcher is closed.
func (w *shared) sendError(err error) bool {
if err == nil {
return true
}
select {
case <-w.done:
return false
case w.Errors <- err:
return true
}
}
func (w *shared) isClosed() bool {
select {
case <-w.done:
return true
default:
return false
}
}
// Mark as closed; returns true if it was already closed.
func (w *shared) close() bool {
w.mu.Lock()
defer w.mu.Unlock()
if w.isClosed() {
return true
}
close(w.done)
return false
}

3
vendor/github.com/fsnotify/fsnotify/staticcheck.conf generated vendored Normal file
View File

@@ -0,0 +1,3 @@
checks = ['all',
'-U1000', # Don't complain about unused functions.
]

7
vendor/github.com/fsnotify/fsnotify/system_bsd.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
//go:build freebsd || openbsd || netbsd || dragonfly
package fsnotify
import "golang.org/x/sys/unix"
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC

8
vendor/github.com/fsnotify/fsnotify/system_darwin.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
//go:build darwin
package fsnotify
import "golang.org/x/sys/unix"
// note: this constant is not defined on BSD
const openMode = unix.O_EVTONLY | unix.O_CLOEXEC

View File

@@ -0,0 +1,18 @@
root = true
[*]
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[*.go]
indent_style = tab
[{Makefile,*.mk}]
indent_style = tab
[*.nix]
indent_size = 2

4
vendor/github.com/go-viper/mapstructure/v2/.envrc generated vendored Normal file
View File

@@ -0,0 +1,4 @@
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
fi
use flake . --impure

View File

@@ -0,0 +1,6 @@
/.devenv/
/.direnv/
/.pre-commit-config.yaml
/bin/
/build/
/var/

View File

@@ -0,0 +1,23 @@
run:
timeout: 5m
linters-settings:
gci:
sections:
- standard
- default
- prefix(github.com/go-viper/mapstructure)
golint:
min-confidence: 0
goimports:
local-prefixes: github.com/go-viper/maptstructure
linters:
disable-all: true
enable:
- gci
- gofmt
- gofumpt
- goimports
- staticcheck
# - stylecheck

104
vendor/github.com/go-viper/mapstructure/v2/CHANGELOG.md generated vendored Normal file
View File

@@ -0,0 +1,104 @@
> [!WARNING]
> As of v2 of this library, change log can be found in GitHub releases.
## 1.5.1
* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282]
* Fix map of slices not decoding properly in certain cases. [GH-266]
## 1.5.0
* New option `IgnoreUntaggedFields` to ignore decoding to any fields
without `mapstructure` (or the configured tag name) set [GH-277]
* New option `ErrorUnset` which makes it an error if any fields
in a target struct are not set by the decoding process. [GH-225]
* New function `OrComposeDecodeHookFunc` to help compose decode hooks. [GH-240]
* Decoding to slice from array no longer crashes [GH-265]
* Decode nested struct pointers to map [GH-271]
* Fix issue where `,squash` was ignored if `Squash` option was set. [GH-280]
* Fix issue where fields with `,omitempty` would sometimes decode
into a map with an empty string key [GH-281]
## 1.4.3
* Fix cases where `json.Number` didn't decode properly [GH-261]
## 1.4.2
* Custom name matchers to support any sort of casing, formatting, etc. for
field names. [GH-250]
* Fix possible panic in ComposeDecodeHookFunc [GH-251]
## 1.4.1
* Fix regression where `*time.Time` value would be set to empty and not be sent
to decode hooks properly [GH-232]
## 1.4.0
* A new decode hook type `DecodeHookFuncValue` has been added that has
access to the full values. [GH-183]
* Squash is now supported with embedded fields that are struct pointers [GH-205]
* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206]
## 1.3.3
* Decoding maps from maps creates a settable value for decode hooks [GH-203]
## 1.3.2
* Decode into interface type with a struct value is supported [GH-187]
## 1.3.1
* Squash should only squash embedded structs. [GH-194]
## 1.3.0
* Added `",omitempty"` support. This will ignore zero values in the source
structure when encoding. [GH-145]
## 1.2.3
* Fix duplicate entries in Keys list with pointer values. [GH-185]
## 1.2.2
* Do not add unsettable (unexported) values to the unused metadata key
or "remain" value. [GH-150]
## 1.2.1
* Go modules checksum mismatch fix
## 1.2.0
* Added support to capture unused values in a field using the `",remain"` value
in the mapstructure tag. There is an example to showcase usage.
* Added `DecoderConfig` option to always squash embedded structs
* `json.Number` can decode into `uint` types
* Empty slices are preserved and not replaced with nil slices
* Fix panic that can occur in when decoding a map into a nil slice of structs
* Improved package documentation for godoc
## 1.1.2
* Fix error when decode hook decodes interface implementation into interface
type. [GH-140]
## 1.1.1
* Fix panic that can happen in `decodePtr`
## 1.1.0
* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
* Support struct to struct decoding [GH-137]
* If source map value is nil, then destination map value is nil (instead of empty)
* If source slice value is nil, then destination slice value is nil (instead of empty)
* If source pointer is nil, then destination pointer is set to nil (instead of
allocated zero value of type)
## 1.0.0
* Initial tagged stable release.

21
vendor/github.com/go-viper/mapstructure/v2/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2013 Mitchell Hashimoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

80
vendor/github.com/go-viper/mapstructure/v2/README.md generated vendored Normal file
View File

@@ -0,0 +1,80 @@
# mapstructure
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI)
[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2)
![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square)
mapstructure is a Go library for decoding generic map values to structures
and vice versa, while providing helpful error handling.
This library is most useful when decoding values from some data stream (JSON,
Gob, etc.) where you don't _quite_ know the structure of the underlying data
until you read a part of it. You can therefore read a `map[string]interface{}`
and use this library to decode it into the proper underlying native Go
structure.
## Installation
```shell
go get github.com/go-viper/mapstructure/v2
```
## Migrating from `github.com/mitchellh/mapstructure`
[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status.
You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`.
The API is the same, so you don't need to change anything else.
Here is a script that can help you with the migration:
```shell
sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go')
```
If you need more time to migrate your code, that is absolutely fine.
Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate:
```shell
replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0
```
## Usage & Example
For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2).
The `Decode` function has examples associated with it there.
## But Why?!
Go offers fantastic standard libraries for decoding formats such as JSON.
The standard method is to have a struct pre-created, and populate that struct
from the bytes of the encoded format. This is great, but the problem is if
you have configuration or an encoding that changes slightly depending on
specific fields. For example, consider this JSON:
```json
{
"type": "person",
"name": "Mitchell"
}
```
Perhaps we can't populate a specific structure without first reading
the "type" field from the JSON. We could always do two passes over the
decoding of the JSON (reading the "type" first, and the rest later).
However, it is much simpler to just decode this into a `map[string]interface{}`
structure, read the "type" key, then use something like this library
to decode it into the proper structure.
## Credits
Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh).
This is a maintained fork of the original library.
Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349).
## License
The project is licensed under the [MIT License](LICENSE).

View File

@@ -0,0 +1,630 @@
package mapstructure
import (
"encoding"
"errors"
"fmt"
"net"
"net/netip"
"net/url"
"reflect"
"strconv"
"strings"
"time"
)
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// Create variables here so we can reference them with the reflect pkg
var f1 DecodeHookFuncType
var f2 DecodeHookFuncKind
var f3 DecodeHookFuncValue
// Fill in the variables into this interface and the rest is done
// automatically using the reflect package.
potential := []interface{}{f1, f2, f3}
v := reflect.ValueOf(h)
vt := v.Type()
for _, raw := range potential {
pt := reflect.ValueOf(raw).Type()
if vt.ConvertibleTo(pt) {
return v.Convert(pt).Interface()
}
}
return nil
}
// cachedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// it into a closure to be used directly
// if the type fails to convert we return a closure always erroring to keep the previous behaviour
func cachedDecodeHook(raw DecodeHookFunc) func(from reflect.Value, to reflect.Value) (interface{}, error) {
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return f(from.Type(), to.Type(), from.Interface())
}
case DecodeHookFuncKind:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return f(from.Kind(), to.Kind(), from.Interface())
}
case DecodeHookFuncValue:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return f(from, to)
}
default:
return func(from reflect.Value, to reflect.Value) (interface{}, error) {
return nil, errors.New("invalid decode hook signature")
}
}
}
// DecodeHookExec executes the given decode hook. This should be used
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
// that took reflect.Kind instead of reflect.Type.
func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Value, to reflect.Value,
) (interface{}, error) {
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from.Type(), to.Type(), from.Interface())
case DecodeHookFuncKind:
return f(from.Kind(), to.Kind(), from.Interface())
case DecodeHookFuncValue:
return f(from, to)
default:
return nil, errors.New("invalid decode hook signature")
}
}
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
// automatically composes multiple DecodeHookFuncs.
//
// The composed funcs are called in order, with the result of the
// previous transformation.
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(fs))
for _, f := range fs {
cached = append(cached, cachedDecodeHook(f))
}
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
var err error
data := f.Interface()
newFrom := f
for _, c := range cached {
data, err = c(newFrom, t)
if err != nil {
return nil, err
}
newFrom = reflect.ValueOf(data)
}
return data, nil
}
}
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
cached := make([]func(from reflect.Value, to reflect.Value) (interface{}, error), 0, len(ff))
for _, f := range ff {
cached = append(cached, cachedDecodeHook(f))
}
return func(a, b reflect.Value) (interface{}, error) {
var allErrs string
var out interface{}
var err error
for _, c := range cached {
out, err = c(a, b)
if err != nil {
allErrs += err.Error() + "\n"
continue
}
return out, nil
}
return nil, errors.New(allErrs)
}
}
// StringToSliceHookFunc returns a DecodeHookFunc that converts
// string to []string by splitting on the given sep.
func StringToSliceHookFunc(sep string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.SliceOf(f) {
return data, nil
}
raw := data.(string)
if raw == "" {
return []string{}, nil
}
return strings.Split(raw, sep), nil
}
}
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
// strings to time.Duration.
func StringToTimeDurationHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Duration(5)) {
return data, nil
}
// Convert it by parsing
return time.ParseDuration(data.(string))
}
}
// StringToURLHookFunc returns a DecodeHookFunc that converts
// strings to *url.URL.
func StringToURLHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(&url.URL{}) {
return data, nil
}
// Convert it by parsing
return url.Parse(data.(string))
}
}
// StringToIPHookFunc returns a DecodeHookFunc that converts
// strings to net.IP
func StringToIPHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IP{}) {
return data, nil
}
// Convert it by parsing
ip := net.ParseIP(data.(string))
if ip == nil {
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
}
return ip, nil
}
}
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
// strings to net.IPNet
func StringToIPNetHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IPNet{}) {
return data, nil
}
// Convert it by parsing
_, net, err := net.ParseCIDR(data.(string))
return net, err
}
}
// StringToTimeHookFunc returns a DecodeHookFunc that converts
// strings to time.Time.
func StringToTimeHookFunc(layout string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Time{}) {
return data, nil
}
// Convert it by parsing
return time.Parse(layout, data.(string))
}
}
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
// the decoder.
//
// Note that this is significantly different from the WeaklyTypedInput option
// of the DecoderConfig.
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
data interface{},
) (interface{}, error) {
dataVal := reflect.ValueOf(data)
switch t {
case reflect.String:
switch f {
case reflect.Bool:
if dataVal.Bool() {
return "1", nil
}
return "0", nil
case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int:
return strconv.FormatInt(dataVal.Int(), 10), nil
case reflect.Slice:
dataType := dataVal.Type()
elemKind := dataType.Elem().Kind()
if elemKind == reflect.Uint8 {
return string(dataVal.Interface().([]uint8)), nil
}
case reflect.Uint:
return strconv.FormatUint(dataVal.Uint(), 10), nil
}
}
return data, nil
}
func RecursiveStructToMapHookFunc() DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
if f.Kind() != reflect.Struct {
return f.Interface(), nil
}
var i interface{} = struct{}{}
if t.Type() != reflect.TypeOf(&i).Elem() {
return f.Interface(), nil
}
m := make(map[string]interface{})
t.Set(reflect.ValueOf(m))
return f.Interface(), nil
}
}
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
// strings to the UnmarshalText function, when the target type
// implements the encoding.TextUnmarshaler interface
func TextUnmarshallerHookFunc() DecodeHookFuncType {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
result := reflect.New(t).Interface()
unmarshaller, ok := result.(encoding.TextUnmarshaler)
if !ok {
return data, nil
}
str, ok := data.(string)
if !ok {
str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String()
}
if err := unmarshaller.UnmarshalText([]byte(str)); err != nil {
return nil, err
}
return result, nil
}
}
// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts
// strings to netip.Addr.
func StringToNetIPAddrHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(netip.Addr{}) {
return data, nil
}
// Convert it by parsing
return netip.ParseAddr(data.(string))
}
}
// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts
// strings to netip.AddrPort.
func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(netip.AddrPort{}) {
return data, nil
}
// Convert it by parsing
return netip.ParseAddrPort(data.(string))
}
}
// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts
// strings to basic types.
// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128
func StringToBasicTypeHookFunc() DecodeHookFunc {
return ComposeDecodeHookFunc(
StringToInt8HookFunc(),
StringToUint8HookFunc(),
StringToInt16HookFunc(),
StringToUint16HookFunc(),
StringToInt32HookFunc(),
StringToUint32HookFunc(),
StringToInt64HookFunc(),
StringToUint64HookFunc(),
StringToIntHookFunc(),
StringToUintHookFunc(),
StringToFloat32HookFunc(),
StringToFloat64HookFunc(),
StringToBoolHookFunc(),
// byte and rune are aliases for uint8 and int32 respectively
// StringToByteHookFunc(),
// StringToRuneHookFunc(),
StringToComplex64HookFunc(),
StringToComplex128HookFunc(),
)
}
// StringToInt8HookFunc returns a DecodeHookFunc that converts
// strings to int8.
func StringToInt8HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int8 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 8)
return int8(i64), err
}
}
// StringToUint8HookFunc returns a DecodeHookFunc that converts
// strings to uint8.
func StringToUint8HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 8)
return uint8(u64), err
}
}
// StringToInt16HookFunc returns a DecodeHookFunc that converts
// strings to int16.
func StringToInt16HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int16 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 16)
return int16(i64), err
}
}
// StringToUint16HookFunc returns a DecodeHookFunc that converts
// strings to uint16.
func StringToUint16HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 16)
return uint16(u64), err
}
}
// StringToInt32HookFunc returns a DecodeHookFunc that converts
// strings to int32.
func StringToInt32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int32 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 32)
return int32(i64), err
}
}
// StringToUint32HookFunc returns a DecodeHookFunc that converts
// strings to uint32.
func StringToUint32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 32)
return uint32(u64), err
}
}
// StringToInt64HookFunc returns a DecodeHookFunc that converts
// strings to int64.
func StringToInt64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseInt(data.(string), 0, 64)
}
}
// StringToUint64HookFunc returns a DecodeHookFunc that converts
// strings to uint64.
func StringToUint64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseUint(data.(string), 0, 64)
}
}
// StringToIntHookFunc returns a DecodeHookFunc that converts
// strings to int.
func StringToIntHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 0)
return int(i64), err
}
}
// StringToUintHookFunc returns a DecodeHookFunc that converts
// strings to uint.
func StringToUintHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 0)
return uint(u64), err
}
}
// StringToFloat32HookFunc returns a DecodeHookFunc that converts
// strings to float32.
func StringToFloat32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Float32 {
return data, nil
}
// Convert it by parsing
f64, err := strconv.ParseFloat(data.(string), 32)
return float32(f64), err
}
}
// StringToFloat64HookFunc returns a DecodeHookFunc that converts
// strings to float64.
func StringToFloat64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Float64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseFloat(data.(string), 64)
}
}
// StringToBoolHookFunc returns a DecodeHookFunc that converts
// strings to bool.
func StringToBoolHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Bool {
return data, nil
}
// Convert it by parsing
return strconv.ParseBool(data.(string))
}
}
// StringToByteHookFunc returns a DecodeHookFunc that converts
// strings to byte.
func StringToByteHookFunc() DecodeHookFunc {
return StringToUint8HookFunc()
}
// StringToRuneHookFunc returns a DecodeHookFunc that converts
// strings to rune.
func StringToRuneHookFunc() DecodeHookFunc {
return StringToInt32HookFunc()
}
// StringToComplex64HookFunc returns a DecodeHookFunc that converts
// strings to complex64.
func StringToComplex64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 {
return data, nil
}
// Convert it by parsing
c128, err := strconv.ParseComplex(data.(string), 64)
return complex64(c128), err
}
}
// StringToComplex128HookFunc returns a DecodeHookFunc that converts
// strings to complex128.
func StringToComplex128HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 {
return data, nil
}
// Convert it by parsing
return strconv.ParseComplex(data.(string), 128)
}
}

472
vendor/github.com/go-viper/mapstructure/v2/flake.lock generated vendored Normal file
View File

@@ -0,0 +1,472 @@
{
"nodes": {
"cachix": {
"inputs": {
"devenv": "devenv_2",
"flake-compat": [
"devenv",
"flake-compat"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"pre-commit-hooks": [
"devenv",
"pre-commit-hooks"
]
},
"locked": {
"lastModified": 1712055811,
"narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=",
"owner": "cachix",
"repo": "cachix",
"rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "cachix",
"type": "github"
}
},
"devenv": {
"inputs": {
"cachix": "cachix",
"flake-compat": "flake-compat_2",
"nix": "nix_2",
"nixpkgs": "nixpkgs_2",
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1717245169,
"narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=",
"owner": "cachix",
"repo": "devenv",
"rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"devenv_2": {
"inputs": {
"flake-compat": [
"devenv",
"cachix",
"flake-compat"
],
"nix": "nix",
"nixpkgs": "nixpkgs",
"poetry2nix": "poetry2nix",
"pre-commit-hooks": [
"devenv",
"cachix",
"pre-commit-hooks"
]
},
"locked": {
"lastModified": 1708704632,
"narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=",
"owner": "cachix",
"repo": "devenv",
"rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "python-rewrite",
"repo": "devenv",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1717285511,
"narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1689068808,
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"devenv",
"pre-commit-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"repo": "nix",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"poetry2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1688870561,
"narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "165b1650b753316aa7f1787f3005a8d2da0f5301",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nix_2": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression_2"
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1692808169,
"narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9201b5ff357e781bf014d0330d18555695df7ba8",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1717284937,
"narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-regression_2": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1710695816,
"narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "614b4613980a522ba49f0d194531beddbb7220d3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1713361204,
"narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=",
"owner": "cachix",
"repo": "devenv-nixpkgs",
"rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "rolling",
"repo": "devenv-nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1717112898,
"narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"poetry2nix": {
"inputs": {
"flake-utils": "flake-utils",
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1692876271,
"narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=",
"owner": "nix-community",
"repo": "poetry2nix",
"rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "poetry2nix",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-utils": "flake-utils_2",
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1713775815,
"narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs_3"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

39
vendor/github.com/go-viper/mapstructure/v2/flake.nix generated vendored Normal file
View File

@@ -0,0 +1,39 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
flake-parts.url = "github:hercules-ci/flake-parts";
devenv.url = "github:cachix/devenv";
};
outputs = inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.devenv.flakeModule
];
systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ];
perSystem = { config, self', inputs', pkgs, system, ... }: rec {
devenv.shells = {
default = {
languages = {
go.enable = true;
};
pre-commit.hooks = {
nixpkgs-fmt.enable = true;
};
packages = with pkgs; [
golangci-lint
];
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
};
ci = devenv.shells.default;
};
};
};
}

View File

@@ -0,0 +1,11 @@
package errors
import "errors"
func New(text string) error {
return errors.New(text)
}
func As(err error, target interface{}) bool {
return errors.As(err, target)
}

View File

@@ -0,0 +1,9 @@
//go:build go1.20
package errors
import "errors"
func Join(errs ...error) error {
return errors.Join(errs...)
}

View File

@@ -0,0 +1,61 @@
//go:build !go1.20
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package errors
// Join returns an error that wraps the given errors.
// Any nil error values are discarded.
// Join returns nil if every value in errs is nil.
// The error formats as the concatenation of the strings obtained
// by calling the Error method of each element of errs, with a newline
// between each string.
//
// A non-nil error returned by Join implements the Unwrap() []error method.
func Join(errs ...error) error {
n := 0
for _, err := range errs {
if err != nil {
n++
}
}
if n == 0 {
return nil
}
e := &joinError{
errs: make([]error, 0, n),
}
for _, err := range errs {
if err != nil {
e.errs = append(e.errs, err)
}
}
return e
}
type joinError struct {
errs []error
}
func (e *joinError) Error() string {
// Since Join returns nil if every value in errs is nil,
// e.errs cannot be empty.
if len(e.errs) == 1 {
return e.errs[0].Error()
}
b := []byte(e.errs[0].Error())
for _, err := range e.errs[1:] {
b = append(b, '\n')
b = append(b, err.Error()...)
}
// At this point, b has at least one byte '\n'.
// return unsafe.String(&b[0], len(b))
return string(b)
}
func (e *joinError) Unwrap() []error {
return e.errs
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,44 @@
//go:build !go1.20
package mapstructure
import "reflect"
func isComparable(v reflect.Value) bool {
k := v.Kind()
switch k {
case reflect.Invalid:
return false
case reflect.Array:
switch v.Type().Elem().Kind() {
case reflect.Interface, reflect.Array, reflect.Struct:
for i := 0; i < v.Type().Len(); i++ {
// if !v.Index(i).Comparable() {
if !isComparable(v.Index(i)) {
return false
}
}
return true
}
return v.Type().Comparable()
case reflect.Interface:
// return v.Elem().Comparable()
return isComparable(v.Elem())
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
return false
// if !v.Field(i).Comparable() {
if !isComparable(v.Field(i)) {
return false
}
}
return true
default:
return v.Type().Comparable()
}
}

View File

@@ -0,0 +1,10 @@
//go:build go1.20
package mapstructure
import "reflect"
// TODO: remove once we drop support for Go <1.20
func isComparable(v reflect.Value) bool {
return v.Comparable()
}

4
vendor/github.com/mattn/go-sqlite3/.codecov.yml generated vendored Normal file
View File

@@ -0,0 +1,4 @@
coverage:
status:
project: off
patch: off

14
vendor/github.com/mattn/go-sqlite3/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,14 @@
*.db
*.exe
*.dll
*.o
# VSCode
.vscode
# Exclude from upgrade
upgrade/*.c
upgrade/*.h
# Exclude upgrade binary
upgrade/upgrade

21
vendor/github.com/mattn/go-sqlite3/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Yasuhiro Matsumoto
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

598
vendor/github.com/mattn/go-sqlite3/README.md generated vendored Normal file
View File

@@ -0,0 +1,598 @@
go-sqlite3
==========
[![Go Reference](https://pkg.go.dev/badge/github.com/mattn/go-sqlite3.svg)](https://pkg.go.dev/github.com/mattn/go-sqlite3)
[![GitHub Actions](https://github.com/mattn/go-sqlite3/workflows/Go/badge.svg)](https://github.com/mattn/go-sqlite3/actions?query=workflow%3AGo)
[![Financial Contributors on Open Collective](https://opencollective.com/mattn-go-sqlite3/all/badge.svg?label=financial+contributors)](https://opencollective.com/mattn-go-sqlite3)
[![codecov](https://codecov.io/gh/mattn/go-sqlite3/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-sqlite3)
[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-sqlite3)](https://goreportcard.com/report/github.com/mattn/go-sqlite3)
Latest stable version is v1.14 or later, not v2.
~~**NOTE:** The increase to v2 was an accident. There were no major changes or features.~~
# Description
A sqlite3 driver that conforms to the built-in database/sql interface.
Supported Golang version: See [.github/workflows/go.yaml](./.github/workflows/go.yaml).
This package follows the official [Golang Release Policy](https://golang.org/doc/devel/release.html#policy).
### Overview
- [go-sqlite3](#go-sqlite3)
- [Description](#description)
- [Overview](#overview)
- [Installation](#installation)
- [API Reference](#api-reference)
- [Connection String](#connection-string)
- [DSN Examples](#dsn-examples)
- [Features](#features)
- [Usage](#usage)
- [Feature / Extension List](#feature--extension-list)
- [Compilation](#compilation)
- [Android](#android)
- [ARM](#arm)
- [Cross Compile](#cross-compile)
- [Compiling](#compiling)
- [Linux](#linux)
- [Alpine](#alpine)
- [Fedora](#fedora)
- [Ubuntu](#ubuntu)
- [macOS](#mac-osx)
- [Windows](#windows)
- [Errors](#errors)
- [User Authentication](#user-authentication)
- [Compile](#compile)
- [Usage](#usage-1)
- [Create protected database](#create-protected-database)
- [Password Encoding](#password-encoding)
- [Available Encoders](#available-encoders)
- [Restrictions](#restrictions)
- [Support](#support)
- [User Management](#user-management)
- [SQL](#sql)
- [Examples](#examples)
- [*SQLiteConn](#sqliteconn)
- [Attached database](#attached-database)
- [Extensions](#extensions)
- [Spatialite](#spatialite)
- [FAQ](#faq)
- [License](#license)
- [Author](#author)
# Installation
This package can be installed with the `go get` command:
go get github.com/mattn/go-sqlite3
_go-sqlite3_ is *cgo* package.
If you want to build your app using go-sqlite3, you need gcc.
***Important: because this is a `CGO` enabled package, you are required to set the environment variable `CGO_ENABLED=1` and have a `gcc` compiler present within your path.***
# API Reference
API documentation can be found [here](http://godoc.org/github.com/mattn/go-sqlite3).
Examples can be found under the [examples](./_example) directory.
# Connection String
When creating a new SQLite database or connection to an existing one, with the file name additional options can be given.
This is also known as a DSN (Data Source Name) string.
Options are append after the filename of the SQLite database.
The database filename and options are separated by an `?` (Question Mark).
Options should be URL-encoded (see [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)).
This also applies when using an in-memory database instead of a file.
Options can be given using the following format: `KEYWORD=VALUE` and multiple options can be combined with the `&` ampersand.
This library supports DSN options of SQLite itself and provides additional options.
Boolean values can be one of:
* `0` `no` `false` `off`
* `1` `yes` `true` `on`
| Name | Key | Value(s) | Description |
|------|-----|----------|-------------|
| UA - Create | `_auth` | - | Create User Authentication, for more information see [User Authentication](#user-authentication) |
| UA - Username | `_auth_user` | `string` | Username for User Authentication, for more information see [User Authentication](#user-authentication) |
| UA - Password | `_auth_pass` | `string` | Password for User Authentication, for more information see [User Authentication](#user-authentication) |
| UA - Crypt | `_auth_crypt` | <ul><li>SHA1</li><li>SSHA1</li><li>SHA256</li><li>SSHA256</li><li>SHA384</li><li>SSHA384</li><li>SHA512</li><li>SSHA512</li></ul> | Password encoder to use for User Authentication, for more information see [User Authentication](#user-authentication) |
| UA - Salt | `_auth_salt` | `string` | Salt to use if the configure password encoder requires a salt, for User Authentication, for more information see [User Authentication](#user-authentication) |
| Auto Vacuum | `_auto_vacuum` \| `_vacuum` | <ul><li>`0` \| `none`</li><li>`1` \| `full`</li><li>`2` \| `incremental`</li></ul> | For more information see [PRAGMA auto_vacuum](https://www.sqlite.org/pragma.html#pragma_auto_vacuum) |
| Busy Timeout | `_busy_timeout` \| `_timeout` | `int` | Specify value for sqlite3_busy_timeout. For more information see [PRAGMA busy_timeout](https://www.sqlite.org/pragma.html#pragma_busy_timeout) |
| Case Sensitive LIKE | `_case_sensitive_like` \| `_cslike` | `boolean` | For more information see [PRAGMA case_sensitive_like](https://www.sqlite.org/pragma.html#pragma_case_sensitive_like) |
| Defer Foreign Keys | `_defer_foreign_keys` \| `_defer_fk` | `boolean` | For more information see [PRAGMA defer_foreign_keys](https://www.sqlite.org/pragma.html#pragma_defer_foreign_keys) |
| Foreign Keys | `_foreign_keys` \| `_fk` | `boolean` | For more information see [PRAGMA foreign_keys](https://www.sqlite.org/pragma.html#pragma_foreign_keys) |
| Ignore CHECK Constraints | `_ignore_check_constraints` | `boolean` | For more information see [PRAGMA ignore_check_constraints](https://www.sqlite.org/pragma.html#pragma_ignore_check_constraints) |
| Immutable | `immutable` | `boolean` | For more information see [Immutable](https://www.sqlite.org/c3ref/open.html) |
| Journal Mode | `_journal_mode` \| `_journal` | <ul><li>DELETE</li><li>TRUNCATE</li><li>PERSIST</li><li>MEMORY</li><li>WAL</li><li>OFF</li></ul> | For more information see [PRAGMA journal_mode](https://www.sqlite.org/pragma.html#pragma_journal_mode) |
| Locking Mode | `_locking_mode` \| `_locking` | <ul><li>NORMAL</li><li>EXCLUSIVE</li></ul> | For more information see [PRAGMA locking_mode](https://www.sqlite.org/pragma.html#pragma_locking_mode) |
| Mode | `mode` | <ul><li>ro</li><li>rw</li><li>rwc</li><li>memory</li></ul> | Access Mode of the database. For more information see [SQLite Open](https://www.sqlite.org/c3ref/open.html) |
| Mutex Locking | `_mutex` | <ul><li>no</li><li>full</li></ul> | Specify mutex mode. |
| Query Only | `_query_only` | `boolean` | For more information see [PRAGMA query_only](https://www.sqlite.org/pragma.html#pragma_query_only) |
| Recursive Triggers | `_recursive_triggers` \| `_rt` | `boolean` | For more information see [PRAGMA recursive_triggers](https://www.sqlite.org/pragma.html#pragma_recursive_triggers) |
| Secure Delete | `_secure_delete` | `boolean` \| `FAST` | For more information see [PRAGMA secure_delete](https://www.sqlite.org/pragma.html#pragma_secure_delete) |
| Shared-Cache Mode | `cache` | <ul><li>shared</li><li>private</li></ul> | Set cache mode for more information see [sqlite.org](https://www.sqlite.org/sharedcache.html) |
| Synchronous | `_synchronous` \| `_sync` | <ul><li>0 \| OFF</li><li>1 \| NORMAL</li><li>2 \| FULL</li><li>3 \| EXTRA</li></ul> | For more information see [PRAGMA synchronous](https://www.sqlite.org/pragma.html#pragma_synchronous) |
| Time Zone Location | `_loc` | auto | Specify location of time format. |
| Transaction Lock | `_txlock` | <ul><li>immediate</li><li>deferred</li><li>exclusive</li></ul> | Specify locking behavior for transactions. |
| Writable Schema | `_writable_schema` | `Boolean` | When this pragma is on, the SQLITE_MASTER tables in which database can be changed using ordinary UPDATE, INSERT, and DELETE statements. Warning: misuse of this pragma can easily result in a corrupt database file. |
| Cache Size | `_cache_size` | `int` | Maximum cache size; default is 2000K (2M). See [PRAGMA cache_size](https://sqlite.org/pragma.html#pragma_cache_size) |
## DSN Examples
```
file:test.db?cache=shared&mode=memory
```
# Features
This package allows additional configuration of features available within SQLite3 to be enabled or disabled by golang build constraints also known as build `tags`.
Click [here](https://golang.org/pkg/go/build/#hdr-Build_Constraints) for more information about build tags / constraints.
### Usage
If you wish to build this library with additional extensions / features, use the following command:
```bash
go build -tags "<FEATURE>"
```
For available features, see the extension list.
When using multiple build tags, all the different tags should be space delimited.
Example:
```bash
go build -tags "icu json1 fts5 secure_delete"
```
### Feature / Extension List
| Extension | Build Tag | Description |
|-----------|-----------|-------------|
| Additional Statistics | sqlite_stat4 | This option adds additional logic to the ANALYZE command and to the query planner that can help SQLite to chose a better query plan under certain situations. The ANALYZE command is enhanced to collect histogram data from all columns of every index and store that data in the sqlite_stat4 table.<br><br>The query planner will then use the histogram data to help it make better index choices. The downside of this compile-time option is that it violates the query planner stability guarantee making it more difficult to ensure consistent performance in mass-produced applications.<br><br>SQLITE_ENABLE_STAT4 is an enhancement of SQLITE_ENABLE_STAT3. STAT3 only recorded histogram data for the left-most column of each index whereas the STAT4 enhancement records histogram data from all columns of each index.<br><br>The SQLITE_ENABLE_STAT3 compile-time option is a no-op and is ignored if the SQLITE_ENABLE_STAT4 compile-time option is used |
| Allow URI Authority | sqlite_allow_uri_authority | URI filenames normally throws an error if the authority section is not either empty or "localhost".<br><br>However, if SQLite is compiled with the SQLITE_ALLOW_URI_AUTHORITY compile-time option, then the URI is converted into a Uniform Naming Convention (UNC) filename and passed down to the underlying operating system that way |
| App Armor | sqlite_app_armor | When defined, this C-preprocessor macro activates extra code that attempts to detect misuse of the SQLite API, such as passing in NULL pointers to required parameters or using objects after they have been destroyed. <br><br>App Armor is not available under `Windows`. |
| Disable Load Extensions | sqlite_omit_load_extension | Loading of external extensions is enabled by default.<br><br>To disable extension loading add the build tag `sqlite_omit_load_extension`. |
| Enable Serialization with `libsqlite3` | sqlite_serialize | Serialization and deserialization of a SQLite database is available by default, unless the build tag `libsqlite3` is set.<br><br>To enable this functionality even if `libsqlite3` is set, add the build tag `sqlite_serialize`. |
| Foreign Keys | sqlite_foreign_keys | This macro determines whether enforcement of foreign key constraints is enabled or disabled by default for new database connections.<br><br>Each database connection can always turn enforcement of foreign key constraints on and off and run-time using the foreign_keys pragma.<br><br>Enforcement of foreign key constraints is normally off by default, but if this compile-time parameter is set to 1, enforcement of foreign key constraints will be on by default |
| Full Auto Vacuum | sqlite_vacuum_full | Set the default auto vacuum to full |
| Incremental Auto Vacuum | sqlite_vacuum_incr | Set the default auto vacuum to incremental |
| Full Text Search Engine | sqlite_fts5 | When this option is defined in the amalgamation, versions 5 of the full-text search engine (fts5) is added to the build automatically |
| International Components for Unicode | sqlite_icu | This option causes the International Components for Unicode or "ICU" extension to SQLite to be added to the build |
| Introspect PRAGMAS | sqlite_introspect | This option adds some extra PRAGMA statements. <ul><li>PRAGMA function_list</li><li>PRAGMA module_list</li><li>PRAGMA pragma_list</li></ul> |
| JSON SQL Functions | sqlite_json | When this option is defined in the amalgamation, the JSON SQL functions are added to the build automatically |
| Math Functions | sqlite_math_functions | This compile-time option enables built-in scalar math functions. For more information see [Built-In Mathematical SQL Functions](https://www.sqlite.org/lang_mathfunc.html) |
| OS Trace | sqlite_os_trace | This option enables OSTRACE() debug logging. This can be verbose and should not be used in production. |
| Pre Update Hook | sqlite_preupdate_hook | Registers a callback function that is invoked prior to each INSERT, UPDATE, and DELETE operation on a database table. |
| Secure Delete | sqlite_secure_delete | This compile-time option changes the default setting of the secure_delete pragma.<br><br>When this option is not used, secure_delete defaults to off. When this option is present, secure_delete defaults to on.<br><br>The secure_delete setting causes deleted content to be overwritten with zeros. There is a small performance penalty since additional I/O must occur.<br><br>On the other hand, secure_delete can prevent fragments of sensitive information from lingering in unused parts of the database file after it has been deleted. See the documentation on the secure_delete pragma for additional information |
| Secure Delete (FAST) | sqlite_secure_delete_fast | For more information see [PRAGMA secure_delete](https://www.sqlite.org/pragma.html#pragma_secure_delete) |
| Tracing / Debug | sqlite_trace | Activate trace functions |
| User Authentication | sqlite_userauth | SQLite User Authentication see [User Authentication](#user-authentication) for more information. |
| Virtual Tables | sqlite_vtable | SQLite Virtual Tables see [SQLite Official VTABLE Documentation](https://www.sqlite.org/vtab.html) for more information, and a [full example here](https://github.com/mattn/go-sqlite3/tree/master/_example/vtable) |
# Compilation
This package requires the `CGO_ENABLED=1` environment variable if not set by default, and the presence of the `gcc` compiler.
If you need to add additional CFLAGS or LDFLAGS to the build command, and do not want to modify this package, then this can be achieved by using the `CGO_CFLAGS` and `CGO_LDFLAGS` environment variables.
## Android
This package can be compiled for android.
Compile with:
```bash
go build -tags "android"
```
For more information see [#201](https://github.com/mattn/go-sqlite3/issues/201)
# ARM
To compile for `ARM` use the following environment:
```bash
env CC=arm-linux-gnueabihf-gcc CXX=arm-linux-gnueabihf-g++ \
CGO_ENABLED=1 GOOS=linux GOARCH=arm GOARM=7 \
go build -v
```
Additional information:
- [#242](https://github.com/mattn/go-sqlite3/issues/242)
- [#504](https://github.com/mattn/go-sqlite3/issues/504)
# Cross Compile
This library can be cross-compiled.
In some cases you are required to the `CC` environment variable with the cross compiler.
## Cross Compiling from macOS
The simplest way to cross compile from macOS is to use [xgo](https://github.com/karalabe/xgo).
Steps:
- Install [musl-cross](https://github.com/FiloSottile/homebrew-musl-cross) (`brew install FiloSottile/musl-cross/musl-cross`).
- Run `CC=x86_64-linux-musl-gcc CXX=x86_64-linux-musl-g++ GOARCH=amd64 GOOS=linux CGO_ENABLED=1 go build -ldflags "-linkmode external -extldflags -static"`.
Please refer to the project's [README](https://github.com/FiloSottile/homebrew-musl-cross#readme) for further information.
# Compiling
## Linux
To compile this package on Linux, you must install the development tools for your linux distribution.
To compile under linux use the build tag `linux`.
```bash
go build -tags "linux"
```
If you wish to link directly to libsqlite3 then you can use the `libsqlite3` build tag.
```
go build -tags "libsqlite3 linux"
```
### Alpine
When building in an `alpine` container run the following command before building:
```
apk add --update gcc musl-dev
```
### Fedora
```bash
sudo yum groupinstall "Development Tools" "Development Libraries"
```
### Ubuntu
```bash
sudo apt-get install build-essential
```
## macOS
macOS should have all the tools present to compile this package. If not, install XCode to add all the developers tools.
Required dependency:
```bash
brew install sqlite3
```
For macOS, there is an additional package to install which is required if you wish to build the `icu` extension.
This additional package can be installed with `homebrew`:
```bash
brew upgrade icu4c
```
To compile for macOS on x86:
```bash
go build -tags "darwin amd64"
```
To compile for macOS on ARM chips:
```bash
go build -tags "darwin arm64"
```
If you wish to link directly to libsqlite3, use the `libsqlite3` build tag:
```
# x86
go build -tags "libsqlite3 darwin amd64"
# ARM
go build -tags "libsqlite3 darwin arm64"
```
Additional information:
- [#206](https://github.com/mattn/go-sqlite3/issues/206)
- [#404](https://github.com/mattn/go-sqlite3/issues/404)
## Windows
To compile this package on Windows, you must have the `gcc` compiler installed.
1) Install a Windows `gcc` toolchain.
2) Add the `bin` folder to the Windows path, if the installer did not do this by default.
3) Open a terminal for the TDM-GCC toolchain, which can be found in the Windows Start menu.
4) Navigate to your project folder and run the `go build ...` command for this package.
For example the TDM-GCC Toolchain can be found [here](https://jmeubank.github.io/tdm-gcc/).
## Errors
- Compile error: `can not be used when making a shared object; recompile with -fPIC`
When receiving a compile time error referencing recompile with `-FPIC` then you
are probably using a hardend system.
You can compile the library on a hardend system with the following command.
```bash
go build -ldflags '-extldflags=-fno-PIC'
```
More details see [#120](https://github.com/mattn/go-sqlite3/issues/120)
- Can't build go-sqlite3 on windows 64bit.
> Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
> See: [#27](https://github.com/mattn/go-sqlite3/issues/27)
- `go get github.com/mattn/go-sqlite3` throws compilation error.
`gcc` throws: `internal compiler error`
Remove the download repository from your disk and try re-install with:
```bash
go install github.com/mattn/go-sqlite3
```
# User Authentication
This package supports the SQLite User Authentication module.
## Compile
To use the User authentication module, the package has to be compiled with the tag `sqlite_userauth`. See [Features](#features).
## Usage
### Create protected database
To create a database protected by user authentication, provide the following argument to the connection string `_auth`.
This will enable user authentication within the database. This option however requires two additional arguments:
- `_auth_user`
- `_auth_pass`
When `_auth` is present in the connection string user authentication will be enabled and the provided user will be created
as an `admin` user. After initial creation, the parameter `_auth` has no effect anymore and can be omitted from the connection string.
Example connection strings:
Create an user authentication database with user `admin` and password `admin`:
`file:test.s3db?_auth&_auth_user=admin&_auth_pass=admin`
Create an user authentication database with user `admin` and password `admin` and use `SHA1` for the password encoding:
`file:test.s3db?_auth&_auth_user=admin&_auth_pass=admin&_auth_crypt=sha1`
### Password Encoding
The passwords within the user authentication module of SQLite are encoded with the SQLite function `sqlite_cryp`.
This function uses a ceasar-cypher which is quite insecure.
This library provides several additional password encoders which can be configured through the connection string.
The password cypher can be configured with the key `_auth_crypt`. And if the configured password encoder also requires an
salt this can be configured with `_auth_salt`.
#### Available Encoders
- SHA1
- SSHA1 (Salted SHA1)
- SHA256
- SSHA256 (salted SHA256)
- SHA384
- SSHA384 (salted SHA384)
- SHA512
- SSHA512 (salted SHA512)
### Restrictions
Operations on the database regarding user management can only be preformed by an administrator user.
### Support
The user authentication supports two kinds of users:
- administrators
- regular users
### User Management
User management can be done by directly using the `*SQLiteConn` or by SQL.
#### SQL
The following sql functions are available for user management:
| Function | Arguments | Description |
|----------|-----------|-------------|
| `authenticate` | username `string`, password `string` | Will authenticate an user, this is done by the connection; and should not be used manually. |
| `auth_user_add` | username `string`, password `string`, admin `int` | This function will add an user to the database.<br>if the database is not protected by user authentication it will enable it. Argument `admin` is an integer identifying if the added user should be an administrator. Only Administrators can add administrators. |
| `auth_user_change` | username `string`, password `string`, admin `int` | Function to modify an user. Users can change their own password, but only an administrator can change the administrator flag. |
| `authUserDelete` | username `string` | Delete an user from the database. Can only be used by an administrator. The current logged in administrator cannot be deleted. This is to make sure their is always an administrator remaining. |
These functions will return an integer:
- 0 (SQLITE_OK)
- 23 (SQLITE_AUTH) Failed to perform due to authentication or insufficient privileges
##### Examples
```sql
// Autheticate user
// Create Admin User
SELECT auth_user_add('admin2', 'admin2', 1);
// Change password for user
SELECT auth_user_change('user', 'userpassword', 0);
// Delete user
SELECT user_delete('user');
```
#### *SQLiteConn
The following functions are available for User authentication from the `*SQLiteConn`:
| Function | Description |
|----------|-------------|
| `Authenticate(username, password string) error` | Authenticate user |
| `AuthUserAdd(username, password string, admin bool) error` | Add user |
| `AuthUserChange(username, password string, admin bool) error` | Modify user |
| `AuthUserDelete(username string) error` | Delete user |
### Attached database
When using attached databases, SQLite will use the authentication from the `main` database for the attached database(s).
# Extensions
If you want your own extension to be listed here, or you want to add a reference to an extension; please submit an Issue for this.
## Spatialite
Spatialite is available as an extension to SQLite, and can be used in combination with this repository.
For an example, see [shaxbee/go-spatialite](https://github.com/shaxbee/go-spatialite).
## extension-functions.c from SQLite3 Contrib
extension-functions.c is available as an extension to SQLite, and provides the following functions:
- Math: acos, asin, atan, atn2, atan2, acosh, asinh, atanh, difference, degrees, radians, cos, sin, tan, cot, cosh, sinh, tanh, coth, exp, log, log10, power, sign, sqrt, square, ceil, floor, pi.
- String: replicate, charindex, leftstr, rightstr, ltrim, rtrim, trim, replace, reverse, proper, padl, padr, padc, strfilter.
- Aggregate: stdev, variance, mode, median, lower_quartile, upper_quartile
For an example, see [dinedal/go-sqlite3-extension-functions](https://github.com/dinedal/go-sqlite3-extension-functions).
# FAQ
- Getting insert error while query is opened.
> You can pass some arguments into the connection string, for example, a URI.
> See: [#39](https://github.com/mattn/go-sqlite3/issues/39)
- Do you want to cross compile? mingw on Linux or Mac?
> See: [#106](https://github.com/mattn/go-sqlite3/issues/106)
> See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html
- Want to get time.Time with current locale
Use `_loc=auto` in SQLite3 filename schema like `file:foo.db?_loc=auto`.
- Can I use this in multiple routines concurrently?
Yes for readonly. But not for writable. See [#50](https://github.com/mattn/go-sqlite3/issues/50), [#51](https://github.com/mattn/go-sqlite3/issues/51), [#209](https://github.com/mattn/go-sqlite3/issues/209), [#274](https://github.com/mattn/go-sqlite3/issues/274).
- Why I'm getting `no such table` error?
Why is it racy if I use a `sql.Open("sqlite3", ":memory:")` database?
Each connection to `":memory:"` opens a brand new in-memory sql database, so if
the stdlib's sql engine happens to open another connection and you've only
specified `":memory:"`, that connection will see a brand new database. A
workaround is to use `"file::memory:?cache=shared"` (or `"file:foobar?mode=memory&cache=shared"`). Every
connection to this string will point to the same in-memory database.
Note that if the last database connection in the pool closes, the in-memory database is deleted. Make sure the [max idle connection limit](https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns) is > 0, and the [connection lifetime](https://golang.org/pkg/database/sql/#DB.SetConnMaxLifetime) is infinite.
For more information see:
* [#204](https://github.com/mattn/go-sqlite3/issues/204)
* [#511](https://github.com/mattn/go-sqlite3/issues/511)
* https://www.sqlite.org/sharedcache.html#shared_cache_and_in_memory_databases
* https://www.sqlite.org/inmemorydb.html#sharedmemdb
- Reading from database with large amount of goroutines fails on OSX.
OS X limits OS-wide to not have more than 1000 files open simultaneously by default.
For more information, see [#289](https://github.com/mattn/go-sqlite3/issues/289)
- Trying to execute a `.` (dot) command throws an error.
Error: `Error: near ".": syntax error`
Dot command are part of SQLite3 CLI, not of this library.
You need to implement the feature or call the sqlite3 cli.
More information see [#305](https://github.com/mattn/go-sqlite3/issues/305).
- Error: `database is locked`
When you get a database is locked, please use the following options.
Add to DSN: `cache=shared`
Example:
```go
db, err := sql.Open("sqlite3", "file:locked.sqlite?cache=shared")
```
Next, please set the database connections of the SQL package to 1:
```go
db.SetMaxOpenConns(1)
```
For more information, see [#209](https://github.com/mattn/go-sqlite3/issues/209).
## Contributors
### Code Contributors
This project exists thanks to all the people who [[contribute](CONTRIBUTING.md)].
<a href="https://github.com/mattn/go-sqlite3/graphs/contributors"><img src="https://opencollective.com/mattn-go-sqlite3/contributors.svg?width=890&button=false" /></a>
### Financial Contributors
Become a financial contributor and help us sustain our community. [[Contribute here](https://opencollective.com/mattn-go-sqlite3/contribute)].
#### Individuals
<a href="https://opencollective.com/mattn-go-sqlite3"><img src="https://opencollective.com/mattn-go-sqlite3/individuals.svg?width=890"></a>
#### Organizations
Support this project with your organization. Your logo will show up here with a link to your website. [[Contribute](https://opencollective.com/mattn-go-sqlite3/contribute)]
<a href="https://opencollective.com/mattn-go-sqlite3/organization/0/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/0/avatar.svg"></a>
<a href="https://opencollective.com/mattn-go-sqlite3/organization/1/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/1/avatar.svg"></a>
<a href="https://opencollective.com/mattn-go-sqlite3/organization/2/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/2/avatar.svg"></a>
<a href="https://opencollective.com/mattn-go-sqlite3/organization/3/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/3/avatar.svg"></a>
<a href="https://opencollective.com/mattn-go-sqlite3/organization/4/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/4/avatar.svg"></a>
<a href="https://opencollective.com/mattn-go-sqlite3/organization/5/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/5/avatar.svg"></a>
<a href="https://opencollective.com/mattn-go-sqlite3/organization/6/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/6/avatar.svg"></a>
<a href="https://opencollective.com/mattn-go-sqlite3/organization/7/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/7/avatar.svg"></a>
<a href="https://opencollective.com/mattn-go-sqlite3/organization/8/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/8/avatar.svg"></a>
<a href="https://opencollective.com/mattn-go-sqlite3/organization/9/website"><img src="https://opencollective.com/mattn-go-sqlite3/organization/9/avatar.svg"></a>
# License
MIT: http://mattn.mit-license.org/2018
sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h
The -binding suffix was added to avoid build failures under gccgo.
In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3.
# Author
Yasuhiro Matsumoto (a.k.a mattn)
G.J.R. Timmer

85
vendor/github.com/mattn/go-sqlite3/backup.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include "sqlite3-binding.h"
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
*/
import "C"
import (
"runtime"
"unsafe"
)
// SQLiteBackup implement interface of Backup.
type SQLiteBackup struct {
b *C.sqlite3_backup
}
// Backup make backup from src to dest.
func (destConn *SQLiteConn) Backup(dest string, srcConn *SQLiteConn, src string) (*SQLiteBackup, error) {
destptr := C.CString(dest)
defer C.free(unsafe.Pointer(destptr))
srcptr := C.CString(src)
defer C.free(unsafe.Pointer(srcptr))
if b := C.sqlite3_backup_init(destConn.db, destptr, srcConn.db, srcptr); b != nil {
bb := &SQLiteBackup{b: b}
runtime.SetFinalizer(bb, (*SQLiteBackup).Finish)
return bb, nil
}
return nil, destConn.lastError()
}
// Step to backs up for one step. Calls the underlying `sqlite3_backup_step`
// function. This function returns a boolean indicating if the backup is done
// and an error signalling any other error. Done is returned if the underlying
// C function returns SQLITE_DONE (Code 101)
func (b *SQLiteBackup) Step(p int) (bool, error) {
ret := C.sqlite3_backup_step(b.b, C.int(p))
if ret == C.SQLITE_DONE {
return true, nil
} else if ret != 0 && ret != C.SQLITE_LOCKED && ret != C.SQLITE_BUSY {
return false, Error{Code: ErrNo(ret)}
}
return false, nil
}
// Remaining return whether have the rest for backup.
func (b *SQLiteBackup) Remaining() int {
return int(C.sqlite3_backup_remaining(b.b))
}
// PageCount return count of pages.
func (b *SQLiteBackup) PageCount() int {
return int(C.sqlite3_backup_pagecount(b.b))
}
// Finish close backup.
func (b *SQLiteBackup) Finish() error {
return b.Close()
}
// Close close backup.
func (b *SQLiteBackup) Close() error {
ret := C.sqlite3_backup_finish(b.b)
// sqlite3_backup_finish() never fails, it just returns the
// error code from previous operations, so clean up before
// checking and returning an error
b.b = nil
runtime.SetFinalizer(b, nil)
if ret != 0 {
return Error{Code: ErrNo(ret)}
}
return nil
}

412
vendor/github.com/mattn/go-sqlite3/callback.go generated vendored Normal file
View File

@@ -0,0 +1,412 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
// You can't export a Go function to C and have definitions in the C
// preamble in the same file, so we have to have callbackTrampoline in
// its own file. Because we need a separate file anyway, the support
// code for SQLite custom functions is in here.
/*
#ifndef USE_LIBSQLITE3
#include "sqlite3-binding.h"
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
void _sqlite3_result_text(sqlite3_context* ctx, const char* s);
void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l);
*/
import "C"
import (
"errors"
"fmt"
"math"
"reflect"
"sync"
"unsafe"
)
//export callbackTrampoline
func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
fi := lookupHandle(C.sqlite3_user_data(ctx)).(*functionInfo)
fi.Call(ctx, args)
}
//export stepTrampoline
func stepTrampoline(ctx *C.sqlite3_context, argc C.int, argv **C.sqlite3_value) {
args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:int(argc):int(argc)]
ai := lookupHandle(C.sqlite3_user_data(ctx)).(*aggInfo)
ai.Step(ctx, args)
}
//export doneTrampoline
func doneTrampoline(ctx *C.sqlite3_context) {
ai := lookupHandle(C.sqlite3_user_data(ctx)).(*aggInfo)
ai.Done(ctx)
}
//export compareTrampoline
func compareTrampoline(handlePtr unsafe.Pointer, la C.int, a *C.char, lb C.int, b *C.char) C.int {
cmp := lookupHandle(handlePtr).(func(string, string) int)
return C.int(cmp(C.GoStringN(a, la), C.GoStringN(b, lb)))
}
//export commitHookTrampoline
func commitHookTrampoline(handle unsafe.Pointer) int {
callback := lookupHandle(handle).(func() int)
return callback()
}
//export rollbackHookTrampoline
func rollbackHookTrampoline(handle unsafe.Pointer) {
callback := lookupHandle(handle).(func())
callback()
}
//export updateHookTrampoline
func updateHookTrampoline(handle unsafe.Pointer, op int, db *C.char, table *C.char, rowid int64) {
callback := lookupHandle(handle).(func(int, string, string, int64))
callback(op, C.GoString(db), C.GoString(table), rowid)
}
//export authorizerTrampoline
func authorizerTrampoline(handle unsafe.Pointer, op int, arg1 *C.char, arg2 *C.char, arg3 *C.char) int {
callback := lookupHandle(handle).(func(int, string, string, string) int)
return callback(op, C.GoString(arg1), C.GoString(arg2), C.GoString(arg3))
}
//export preUpdateHookTrampoline
func preUpdateHookTrampoline(handle unsafe.Pointer, dbHandle uintptr, op int, db *C.char, table *C.char, oldrowid int64, newrowid int64) {
hval := lookupHandleVal(handle)
data := SQLitePreUpdateData{
Conn: hval.db,
Op: op,
DatabaseName: C.GoString(db),
TableName: C.GoString(table),
OldRowID: oldrowid,
NewRowID: newrowid,
}
callback := hval.val.(func(SQLitePreUpdateData))
callback(data)
}
// Use handles to avoid passing Go pointers to C.
type handleVal struct {
db *SQLiteConn
val any
}
var handleLock sync.Mutex
var handleVals = make(map[unsafe.Pointer]handleVal)
func newHandle(db *SQLiteConn, v any) unsafe.Pointer {
handleLock.Lock()
defer handleLock.Unlock()
val := handleVal{db: db, val: v}
var p unsafe.Pointer = C.malloc(C.size_t(1))
if p == nil {
panic("can't allocate 'cgo-pointer hack index pointer': ptr == nil")
}
handleVals[p] = val
return p
}
func lookupHandleVal(handle unsafe.Pointer) handleVal {
handleLock.Lock()
defer handleLock.Unlock()
return handleVals[handle]
}
func lookupHandle(handle unsafe.Pointer) any {
return lookupHandleVal(handle).val
}
func deleteHandles(db *SQLiteConn) {
handleLock.Lock()
defer handleLock.Unlock()
for handle, val := range handleVals {
if val.db == db {
delete(handleVals, handle)
C.free(handle)
}
}
}
// This is only here so that tests can refer to it.
type callbackArgRaw C.sqlite3_value
type callbackArgConverter func(*C.sqlite3_value) (reflect.Value, error)
type callbackArgCast struct {
f callbackArgConverter
typ reflect.Type
}
func (c callbackArgCast) Run(v *C.sqlite3_value) (reflect.Value, error) {
val, err := c.f(v)
if err != nil {
return reflect.Value{}, err
}
if !val.Type().ConvertibleTo(c.typ) {
return reflect.Value{}, fmt.Errorf("cannot convert %s to %s", val.Type(), c.typ)
}
return val.Convert(c.typ), nil
}
func callbackArgInt64(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
}
return reflect.ValueOf(int64(C.sqlite3_value_int64(v))), nil
}
func callbackArgBool(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
}
i := int64(C.sqlite3_value_int64(v))
val := false
if i != 0 {
val = true
}
return reflect.ValueOf(val), nil
}
func callbackArgFloat64(v *C.sqlite3_value) (reflect.Value, error) {
if C.sqlite3_value_type(v) != C.SQLITE_FLOAT {
return reflect.Value{}, fmt.Errorf("argument must be a FLOAT")
}
return reflect.ValueOf(float64(C.sqlite3_value_double(v))), nil
}
func callbackArgBytes(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_BLOB:
l := C.sqlite3_value_bytes(v)
p := C.sqlite3_value_blob(v)
return reflect.ValueOf(C.GoBytes(p, l)), nil
case C.SQLITE_TEXT:
l := C.sqlite3_value_bytes(v)
c := unsafe.Pointer(C.sqlite3_value_text(v))
return reflect.ValueOf(C.GoBytes(c, l)), nil
default:
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
}
}
func callbackArgString(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_BLOB:
l := C.sqlite3_value_bytes(v)
p := (*C.char)(C.sqlite3_value_blob(v))
return reflect.ValueOf(C.GoStringN(p, l)), nil
case C.SQLITE_TEXT:
c := (*C.char)(unsafe.Pointer(C.sqlite3_value_text(v)))
return reflect.ValueOf(C.GoString(c)), nil
default:
return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
}
}
func callbackArgGeneric(v *C.sqlite3_value) (reflect.Value, error) {
switch C.sqlite3_value_type(v) {
case C.SQLITE_INTEGER:
return callbackArgInt64(v)
case C.SQLITE_FLOAT:
return callbackArgFloat64(v)
case C.SQLITE_TEXT:
return callbackArgString(v)
case C.SQLITE_BLOB:
return callbackArgBytes(v)
case C.SQLITE_NULL:
// Interpret NULL as a nil byte slice.
var ret []byte
return reflect.ValueOf(ret), nil
default:
panic("unreachable")
}
}
func callbackArg(typ reflect.Type) (callbackArgConverter, error) {
switch typ.Kind() {
case reflect.Interface:
if typ.NumMethod() != 0 {
return nil, errors.New("the only supported interface type is any")
}
return callbackArgGeneric, nil
case reflect.Slice:
if typ.Elem().Kind() != reflect.Uint8 {
return nil, errors.New("the only supported slice type is []byte")
}
return callbackArgBytes, nil
case reflect.String:
return callbackArgString, nil
case reflect.Bool:
return callbackArgBool, nil
case reflect.Int64:
return callbackArgInt64, nil
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
c := callbackArgCast{callbackArgInt64, typ}
return c.Run, nil
case reflect.Float64:
return callbackArgFloat64, nil
case reflect.Float32:
c := callbackArgCast{callbackArgFloat64, typ}
return c.Run, nil
default:
return nil, fmt.Errorf("don't know how to convert to %s", typ)
}
}
func callbackConvertArgs(argv []*C.sqlite3_value, converters []callbackArgConverter, variadic callbackArgConverter) ([]reflect.Value, error) {
var args []reflect.Value
if len(argv) < len(converters) {
return nil, fmt.Errorf("function requires at least %d arguments", len(converters))
}
for i, arg := range argv[:len(converters)] {
v, err := converters[i](arg)
if err != nil {
return nil, err
}
args = append(args, v)
}
if variadic != nil {
for _, arg := range argv[len(converters):] {
v, err := variadic(arg)
if err != nil {
return nil, err
}
args = append(args, v)
}
}
return args, nil
}
type callbackRetConverter func(*C.sqlite3_context, reflect.Value) error
func callbackRetInteger(ctx *C.sqlite3_context, v reflect.Value) error {
switch v.Type().Kind() {
case reflect.Int64:
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
v = v.Convert(reflect.TypeOf(int64(0)))
case reflect.Bool:
b := v.Interface().(bool)
if b {
v = reflect.ValueOf(int64(1))
} else {
v = reflect.ValueOf(int64(0))
}
default:
return fmt.Errorf("cannot convert %s to INTEGER", v.Type())
}
C.sqlite3_result_int64(ctx, C.sqlite3_int64(v.Interface().(int64)))
return nil
}
func callbackRetFloat(ctx *C.sqlite3_context, v reflect.Value) error {
switch v.Type().Kind() {
case reflect.Float64:
case reflect.Float32:
v = v.Convert(reflect.TypeOf(float64(0)))
default:
return fmt.Errorf("cannot convert %s to FLOAT", v.Type())
}
C.sqlite3_result_double(ctx, C.double(v.Interface().(float64)))
return nil
}
func callbackRetBlob(ctx *C.sqlite3_context, v reflect.Value) error {
if v.Type().Kind() != reflect.Slice || v.Type().Elem().Kind() != reflect.Uint8 {
return fmt.Errorf("cannot convert %s to BLOB", v.Type())
}
i := v.Interface()
if i == nil || len(i.([]byte)) == 0 {
C.sqlite3_result_null(ctx)
} else {
bs := i.([]byte)
C._sqlite3_result_blob(ctx, unsafe.Pointer(&bs[0]), C.int(len(bs)))
}
return nil
}
func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error {
if v.Type().Kind() != reflect.String {
return fmt.Errorf("cannot convert %s to TEXT", v.Type())
}
cstr := C.CString(v.Interface().(string))
C._sqlite3_result_text(ctx, cstr)
return nil
}
func callbackRetNil(ctx *C.sqlite3_context, v reflect.Value) error {
return nil
}
func callbackRetGeneric(ctx *C.sqlite3_context, v reflect.Value) error {
if v.IsNil() {
C.sqlite3_result_null(ctx)
return nil
}
cb, err := callbackRet(v.Elem().Type())
if err != nil {
return err
}
return cb(ctx, v.Elem())
}
func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
switch typ.Kind() {
case reflect.Interface:
errorInterface := reflect.TypeOf((*error)(nil)).Elem()
if typ.Implements(errorInterface) {
return callbackRetNil, nil
}
if typ.NumMethod() == 0 {
return callbackRetGeneric, nil
}
fallthrough
case reflect.Slice:
if typ.Elem().Kind() != reflect.Uint8 {
return nil, errors.New("the only supported slice type is []byte")
}
return callbackRetBlob, nil
case reflect.String:
return callbackRetText, nil
case reflect.Bool, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
return callbackRetInteger, nil
case reflect.Float32, reflect.Float64:
return callbackRetFloat, nil
default:
return nil, fmt.Errorf("don't know how to convert to %s", typ)
}
}
func callbackError(ctx *C.sqlite3_context, err error) {
cstr := C.CString(err.Error())
defer C.free(unsafe.Pointer(cstr))
C.sqlite3_result_error(ctx, cstr, C.int(-1))
}
// Test support code. Tests are not allowed to import "C", so we can't
// declare any functions that use C.sqlite3_value.
func callbackSyntheticForTests(v reflect.Value, err error) callbackArgConverter {
return func(*C.sqlite3_value) (reflect.Value, error) {
return v, err
}
}

299
vendor/github.com/mattn/go-sqlite3/convert.go generated vendored Normal file
View File

@@ -0,0 +1,299 @@
// Extracted from Go database/sql source code
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Type conversions for Scan.
package sqlite3
import (
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"reflect"
"strconv"
"time"
)
var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error
// convertAssign copies to dest the value in src, converting it if possible.
// An error is returned if the copy would result in loss of information.
// dest should be a pointer type.
func convertAssign(dest, src any) error {
// Common cases, without reflect.
switch s := src.(type) {
case string:
switch d := dest.(type) {
case *string:
if d == nil {
return errNilPtr
}
*d = s
return nil
case *[]byte:
if d == nil {
return errNilPtr
}
*d = []byte(s)
return nil
case *sql.RawBytes:
if d == nil {
return errNilPtr
}
*d = append((*d)[:0], s...)
return nil
}
case []byte:
switch d := dest.(type) {
case *string:
if d == nil {
return errNilPtr
}
*d = string(s)
return nil
case *any:
if d == nil {
return errNilPtr
}
*d = cloneBytes(s)
return nil
case *[]byte:
if d == nil {
return errNilPtr
}
*d = cloneBytes(s)
return nil
case *sql.RawBytes:
if d == nil {
return errNilPtr
}
*d = s
return nil
}
case time.Time:
switch d := dest.(type) {
case *time.Time:
*d = s
return nil
case *string:
*d = s.Format(time.RFC3339Nano)
return nil
case *[]byte:
if d == nil {
return errNilPtr
}
*d = []byte(s.Format(time.RFC3339Nano))
return nil
case *sql.RawBytes:
if d == nil {
return errNilPtr
}
*d = s.AppendFormat((*d)[:0], time.RFC3339Nano)
return nil
}
case nil:
switch d := dest.(type) {
case *any:
if d == nil {
return errNilPtr
}
*d = nil
return nil
case *[]byte:
if d == nil {
return errNilPtr
}
*d = nil
return nil
case *sql.RawBytes:
if d == nil {
return errNilPtr
}
*d = nil
return nil
}
}
var sv reflect.Value
switch d := dest.(type) {
case *string:
sv = reflect.ValueOf(src)
switch sv.Kind() {
case reflect.Bool,
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float32, reflect.Float64:
*d = asString(src)
return nil
}
case *[]byte:
sv = reflect.ValueOf(src)
if b, ok := asBytes(nil, sv); ok {
*d = b
return nil
}
case *sql.RawBytes:
sv = reflect.ValueOf(src)
if b, ok := asBytes([]byte(*d)[:0], sv); ok {
*d = sql.RawBytes(b)
return nil
}
case *bool:
bv, err := driver.Bool.ConvertValue(src)
if err == nil {
*d = bv.(bool)
}
return err
case *any:
*d = src
return nil
}
if scanner, ok := dest.(sql.Scanner); ok {
return scanner.Scan(src)
}
dpv := reflect.ValueOf(dest)
if dpv.Kind() != reflect.Ptr {
return errors.New("destination not a pointer")
}
if dpv.IsNil() {
return errNilPtr
}
if !sv.IsValid() {
sv = reflect.ValueOf(src)
}
dv := reflect.Indirect(dpv)
if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) {
switch b := src.(type) {
case []byte:
dv.Set(reflect.ValueOf(cloneBytes(b)))
default:
dv.Set(sv)
}
return nil
}
if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) {
dv.Set(sv.Convert(dv.Type()))
return nil
}
// The following conversions use a string value as an intermediate representation
// to convert between various numeric types.
//
// This also allows scanning into user defined types such as "type Int int64".
// For symmetry, also check for string destination types.
switch dv.Kind() {
case reflect.Ptr:
if src == nil {
dv.Set(reflect.Zero(dv.Type()))
return nil
}
dv.Set(reflect.New(dv.Type().Elem()))
return convertAssign(dv.Interface(), src)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s := asString(src)
i64, err := strconv.ParseInt(s, 10, dv.Type().Bits())
if err != nil {
err = strconvErr(err)
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
}
dv.SetInt(i64)
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
s := asString(src)
u64, err := strconv.ParseUint(s, 10, dv.Type().Bits())
if err != nil {
err = strconvErr(err)
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
}
dv.SetUint(u64)
return nil
case reflect.Float32, reflect.Float64:
s := asString(src)
f64, err := strconv.ParseFloat(s, dv.Type().Bits())
if err != nil {
err = strconvErr(err)
return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err)
}
dv.SetFloat(f64)
return nil
case reflect.String:
switch v := src.(type) {
case string:
dv.SetString(v)
return nil
case []byte:
dv.SetString(string(v))
return nil
}
}
return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest)
}
func strconvErr(err error) error {
if ne, ok := err.(*strconv.NumError); ok {
return ne.Err
}
return err
}
func cloneBytes(b []byte) []byte {
if b == nil {
return nil
}
c := make([]byte, len(b))
copy(c, b)
return c
}
func asString(src any) string {
switch v := src.(type) {
case string:
return v
case []byte:
return string(v)
}
rv := reflect.ValueOf(src)
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(rv.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return strconv.FormatUint(rv.Uint(), 10)
case reflect.Float64:
return strconv.FormatFloat(rv.Float(), 'g', -1, 64)
case reflect.Float32:
return strconv.FormatFloat(rv.Float(), 'g', -1, 32)
case reflect.Bool:
return strconv.FormatBool(rv.Bool())
}
return fmt.Sprintf("%v", src)
}
func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.AppendInt(buf, rv.Int(), 10), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return strconv.AppendUint(buf, rv.Uint(), 10), true
case reflect.Float32:
return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true
case reflect.Float64:
return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true
case reflect.Bool:
return strconv.AppendBool(buf, rv.Bool()), true
case reflect.String:
s := rv.String()
return append(buf, s...), true
}
return
}

134
vendor/github.com/mattn/go-sqlite3/doc.go generated vendored Normal file
View File

@@ -0,0 +1,134 @@
/*
Package sqlite3 provides interface to SQLite3 databases.
This works as a driver for database/sql.
Installation
go get github.com/mattn/go-sqlite3
# Supported Types
Currently, go-sqlite3 supports the following data types.
+------------------------------+
|go | sqlite3 |
|----------|-------------------|
|nil | null |
|int | integer |
|int64 | integer |
|float64 | float |
|bool | integer |
|[]byte | blob |
|string | text |
|time.Time | timestamp/datetime|
+------------------------------+
# SQLite3 Extension
You can write your own extension module for sqlite3. For example, below is an
extension for a Regexp matcher operation.
#include <pcre.h>
#include <string.h>
#include <stdio.h>
#include <sqlite3ext.h>
SQLITE_EXTENSION_INIT1
static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
if (argc >= 2) {
const char *target = (const char *)sqlite3_value_text(argv[1]);
const char *pattern = (const char *)sqlite3_value_text(argv[0]);
const char* errstr = NULL;
int erroff = 0;
int vec[500];
int n, rc;
pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
if (rc <= 0) {
sqlite3_result_error(context, errstr, 0);
return;
}
sqlite3_result_int(context, 1);
}
}
#ifdef _WIN32
__declspec(dllexport)
#endif
int sqlite3_extension_init(sqlite3 *db, char **errmsg,
const sqlite3_api_routines *api) {
SQLITE_EXTENSION_INIT2(api);
return sqlite3_create_function(db, "regexp", 2, SQLITE_UTF8,
(void*)db, regexp_func, NULL, NULL);
}
It needs to be built as a so/dll shared library. And you need to register
the extension module like below.
sql.Register("sqlite3_with_extensions",
&sqlite3.SQLiteDriver{
Extensions: []string{
"sqlite3_mod_regexp",
},
})
Then, you can use this extension.
rows, err := db.Query("select text from mytable where name regexp '^golang'")
# Connection Hook
You can hook and inject your code when the connection is established by setting
ConnectHook to get the SQLiteConn.
sql.Register("sqlite3_with_hook_example",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
sqlite3conn = append(sqlite3conn, conn)
return nil
},
})
You can also use database/sql.Conn.Raw (Go >= 1.13):
conn, err := db.Conn(context.Background())
// if err != nil { ... }
defer conn.Close()
err = conn.Raw(func (driverConn any) error {
sqliteConn := driverConn.(*sqlite3.SQLiteConn)
// ... use sqliteConn
})
// if err != nil { ... }
# Go SQlite3 Extensions
If you want to register Go functions as SQLite extension functions
you can make a custom driver by calling RegisterFunction from
ConnectHook.
regex = func(re, s string) (bool, error) {
return regexp.MatchString(re, s)
}
sql.Register("sqlite3_extended",
&sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
return conn.RegisterFunc("regexp", regex, true)
},
})
You can then use the custom driver by passing its name to sql.Open.
var i int
conn, err := sql.Open("sqlite3_extended", "./foo.db")
if err != nil {
panic(err)
}
err = db.QueryRow(`SELECT regexp("foo.*", "seafood")`).Scan(&i)
if err != nil {
panic(err)
}
See the documentation of RegisterFunc for more details.
*/
package sqlite3

150
vendor/github.com/mattn/go-sqlite3/error.go generated vendored Normal file
View File

@@ -0,0 +1,150 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include "sqlite3-binding.h"
#else
#include <sqlite3.h>
#endif
*/
import "C"
import "syscall"
// ErrNo inherit errno.
type ErrNo int
// ErrNoMask is mask code.
const ErrNoMask C.int = 0xff
// ErrNoExtended is extended errno.
type ErrNoExtended int
// Error implement sqlite error code.
type Error struct {
Code ErrNo /* The error code returned by SQLite */
ExtendedCode ErrNoExtended /* The extended error code returned by SQLite */
SystemErrno syscall.Errno /* The system errno returned by the OS through SQLite, if applicable */
err string /* The error string returned by sqlite3_errmsg(),
this usually contains more specific details. */
}
// result codes from http://www.sqlite.org/c3ref/c_abort.html
var (
ErrError = ErrNo(1) /* SQL error or missing database */
ErrInternal = ErrNo(2) /* Internal logic error in SQLite */
ErrPerm = ErrNo(3) /* Access permission denied */
ErrAbort = ErrNo(4) /* Callback routine requested an abort */
ErrBusy = ErrNo(5) /* The database file is locked */
ErrLocked = ErrNo(6) /* A table in the database is locked */
ErrNomem = ErrNo(7) /* A malloc() failed */
ErrReadonly = ErrNo(8) /* Attempt to write a readonly database */
ErrInterrupt = ErrNo(9) /* Operation terminated by sqlite3_interrupt() */
ErrIoErr = ErrNo(10) /* Some kind of disk I/O error occurred */
ErrCorrupt = ErrNo(11) /* The database disk image is malformed */
ErrNotFound = ErrNo(12) /* Unknown opcode in sqlite3_file_control() */
ErrFull = ErrNo(13) /* Insertion failed because database is full */
ErrCantOpen = ErrNo(14) /* Unable to open the database file */
ErrProtocol = ErrNo(15) /* Database lock protocol error */
ErrEmpty = ErrNo(16) /* Database is empty */
ErrSchema = ErrNo(17) /* The database schema changed */
ErrTooBig = ErrNo(18) /* String or BLOB exceeds size limit */
ErrConstraint = ErrNo(19) /* Abort due to constraint violation */
ErrMismatch = ErrNo(20) /* Data type mismatch */
ErrMisuse = ErrNo(21) /* Library used incorrectly */
ErrNoLFS = ErrNo(22) /* Uses OS features not supported on host */
ErrAuth = ErrNo(23) /* Authorization denied */
ErrFormat = ErrNo(24) /* Auxiliary database format error */
ErrRange = ErrNo(25) /* 2nd parameter to sqlite3_bind out of range */
ErrNotADB = ErrNo(26) /* File opened that is not a database file */
ErrNotice = ErrNo(27) /* Notifications from sqlite3_log() */
ErrWarning = ErrNo(28) /* Warnings from sqlite3_log() */
)
// Error return error message from errno.
func (err ErrNo) Error() string {
return Error{Code: err}.Error()
}
// Extend return extended errno.
func (err ErrNo) Extend(by int) ErrNoExtended {
return ErrNoExtended(int(err) | (by << 8))
}
// Error return error message that is extended code.
func (err ErrNoExtended) Error() string {
return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error()
}
func (err Error) Error() string {
var str string
if err.err != "" {
str = err.err
} else {
str = C.GoString(C.sqlite3_errstr(C.int(err.Code)))
}
if err.SystemErrno != 0 {
str += ": " + err.SystemErrno.Error()
}
return str
}
// result codes from http://www.sqlite.org/c3ref/c_abort_rollback.html
var (
ErrIoErrRead = ErrIoErr.Extend(1)
ErrIoErrShortRead = ErrIoErr.Extend(2)
ErrIoErrWrite = ErrIoErr.Extend(3)
ErrIoErrFsync = ErrIoErr.Extend(4)
ErrIoErrDirFsync = ErrIoErr.Extend(5)
ErrIoErrTruncate = ErrIoErr.Extend(6)
ErrIoErrFstat = ErrIoErr.Extend(7)
ErrIoErrUnlock = ErrIoErr.Extend(8)
ErrIoErrRDlock = ErrIoErr.Extend(9)
ErrIoErrDelete = ErrIoErr.Extend(10)
ErrIoErrBlocked = ErrIoErr.Extend(11)
ErrIoErrNoMem = ErrIoErr.Extend(12)
ErrIoErrAccess = ErrIoErr.Extend(13)
ErrIoErrCheckReservedLock = ErrIoErr.Extend(14)
ErrIoErrLock = ErrIoErr.Extend(15)
ErrIoErrClose = ErrIoErr.Extend(16)
ErrIoErrDirClose = ErrIoErr.Extend(17)
ErrIoErrSHMOpen = ErrIoErr.Extend(18)
ErrIoErrSHMSize = ErrIoErr.Extend(19)
ErrIoErrSHMLock = ErrIoErr.Extend(20)
ErrIoErrSHMMap = ErrIoErr.Extend(21)
ErrIoErrSeek = ErrIoErr.Extend(22)
ErrIoErrDeleteNoent = ErrIoErr.Extend(23)
ErrIoErrMMap = ErrIoErr.Extend(24)
ErrIoErrGetTempPath = ErrIoErr.Extend(25)
ErrIoErrConvPath = ErrIoErr.Extend(26)
ErrLockedSharedCache = ErrLocked.Extend(1)
ErrBusyRecovery = ErrBusy.Extend(1)
ErrBusySnapshot = ErrBusy.Extend(2)
ErrCantOpenNoTempDir = ErrCantOpen.Extend(1)
ErrCantOpenIsDir = ErrCantOpen.Extend(2)
ErrCantOpenFullPath = ErrCantOpen.Extend(3)
ErrCantOpenConvPath = ErrCantOpen.Extend(4)
ErrCorruptVTab = ErrCorrupt.Extend(1)
ErrReadonlyRecovery = ErrReadonly.Extend(1)
ErrReadonlyCantLock = ErrReadonly.Extend(2)
ErrReadonlyRollback = ErrReadonly.Extend(3)
ErrReadonlyDbMoved = ErrReadonly.Extend(4)
ErrAbortRollback = ErrAbort.Extend(2)
ErrConstraintCheck = ErrConstraint.Extend(1)
ErrConstraintCommitHook = ErrConstraint.Extend(2)
ErrConstraintForeignKey = ErrConstraint.Extend(3)
ErrConstraintFunction = ErrConstraint.Extend(4)
ErrConstraintNotNull = ErrConstraint.Extend(5)
ErrConstraintPrimaryKey = ErrConstraint.Extend(6)
ErrConstraintTrigger = ErrConstraint.Extend(7)
ErrConstraintUnique = ErrConstraint.Extend(8)
ErrConstraintVTab = ErrConstraint.Extend(9)
ErrConstraintRowID = ErrConstraint.Extend(10)
ErrNoticeRecoverWAL = ErrNotice.Extend(1)
ErrNoticeRecoverRollback = ErrNotice.Extend(2)
ErrWarningAutoIndex = ErrWarning.Extend(1)
)

258039
vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c generated vendored Normal file

File diff suppressed because it is too large Load Diff

13526
vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h generated vendored Normal file

File diff suppressed because it is too large Load Diff

2284
vendor/github.com/mattn/go-sqlite3/sqlite3.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

103
vendor/github.com/mattn/go-sqlite3/sqlite3_context.go generated vendored Normal file
View File

@@ -0,0 +1,103 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include "sqlite3-binding.h"
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
// These wrappers are necessary because SQLITE_TRANSIENT
// is a pointer constant, and cgo doesn't translate them correctly.
static inline void my_result_text(sqlite3_context *ctx, char *p, int np) {
sqlite3_result_text(ctx, p, np, SQLITE_TRANSIENT);
}
static inline void my_result_blob(sqlite3_context *ctx, void *p, int np) {
sqlite3_result_blob(ctx, p, np, SQLITE_TRANSIENT);
}
*/
import "C"
import (
"math"
"reflect"
"unsafe"
)
const i64 = unsafe.Sizeof(int(0)) > 4
// SQLiteContext behave sqlite3_context
type SQLiteContext C.sqlite3_context
// ResultBool sets the result of an SQL function.
func (c *SQLiteContext) ResultBool(b bool) {
if b {
c.ResultInt(1)
} else {
c.ResultInt(0)
}
}
// ResultBlob sets the result of an SQL function.
// See: sqlite3_result_blob, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultBlob(b []byte) {
if i64 && len(b) > math.MaxInt32 {
C.sqlite3_result_error_toobig((*C.sqlite3_context)(c))
return
}
var p *byte
if len(b) > 0 {
p = &b[0]
}
C.my_result_blob((*C.sqlite3_context)(c), unsafe.Pointer(p), C.int(len(b)))
}
// ResultDouble sets the result of an SQL function.
// See: sqlite3_result_double, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultDouble(d float64) {
C.sqlite3_result_double((*C.sqlite3_context)(c), C.double(d))
}
// ResultInt sets the result of an SQL function.
// See: sqlite3_result_int, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultInt(i int) {
if i64 && (i > math.MaxInt32 || i < math.MinInt32) {
C.sqlite3_result_int64((*C.sqlite3_context)(c), C.sqlite3_int64(i))
} else {
C.sqlite3_result_int((*C.sqlite3_context)(c), C.int(i))
}
}
// ResultInt64 sets the result of an SQL function.
// See: sqlite3_result_int64, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultInt64(i int64) {
C.sqlite3_result_int64((*C.sqlite3_context)(c), C.sqlite3_int64(i))
}
// ResultNull sets the result of an SQL function.
// See: sqlite3_result_null, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultNull() {
C.sqlite3_result_null((*C.sqlite3_context)(c))
}
// ResultText sets the result of an SQL function.
// See: sqlite3_result_text, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultText(s string) {
h := (*reflect.StringHeader)(unsafe.Pointer(&s))
cs, l := (*C.char)(unsafe.Pointer(h.Data)), C.int(h.Len)
C.my_result_text((*C.sqlite3_context)(c), cs, l)
}
// ResultZeroblob sets the result of an SQL function.
// See: sqlite3_result_zeroblob, http://sqlite.org/c3ref/result_blob.html
func (c *SQLiteContext) ResultZeroblob(n int) {
C.sqlite3_result_zeroblob((*C.sqlite3_context)(c), C.int(n))
}

View File

@@ -0,0 +1,120 @@
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package sqlite3
import (
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
)
// This file provides several different implementations for the
// default embedded sqlite_crypt function.
// This function is uses a caesar-cypher by default
// and is used within the UserAuthentication module to encode
// the password.
//
// The provided functions can be used as an overload to the sqlite_crypt
// function through the use of the RegisterFunc on the connection.
//
// Because the functions can serv a purpose to an end-user
// without using the UserAuthentication module
// the functions are default compiled in.
//
// From SQLITE3 - user-auth.txt
// The sqlite_user.pw field is encoded by a built-in SQL function
// "sqlite_crypt(X,Y)". The two arguments are both BLOBs. The first argument
// is the plaintext password supplied to the sqlite3_user_authenticate()
// interface. The second argument is the sqlite_user.pw value and is supplied
// so that the function can extract the "salt" used by the password encoder.
// The result of sqlite_crypt(X,Y) is another blob which is the value that
// ends up being stored in sqlite_user.pw. To verify credentials X supplied
// by the sqlite3_user_authenticate() routine, SQLite runs:
//
// sqlite_user.pw == sqlite_crypt(X, sqlite_user.pw)
//
// To compute an appropriate sqlite_user.pw value from a new or modified
// password X, sqlite_crypt(X,NULL) is run. A new random salt is selected
// when the second argument is NULL.
//
// The built-in version of of sqlite_crypt() uses a simple Caesar-cypher
// which prevents passwords from being revealed by searching the raw database
// for ASCII text, but is otherwise trivally broken. For better password
// security, the database should be encrypted using the SQLite Encryption
// Extension or similar technology. Or, the application can use the
// sqlite3_create_function() interface to provide an alternative
// implementation of sqlite_crypt() that computes a stronger password hash,
// perhaps using a cryptographic hash function like SHA1.
// CryptEncoderSHA1 encodes a password with SHA1
func CryptEncoderSHA1(pass []byte, hash any) []byte {
h := sha1.Sum(pass)
return h[:]
}
// CryptEncoderSSHA1 encodes a password with SHA1 with the
// configured salt.
func CryptEncoderSSHA1(salt string) func(pass []byte, hash any) []byte {
return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha1.Sum(p)
return h[:]
}
}
// CryptEncoderSHA256 encodes a password with SHA256
func CryptEncoderSHA256(pass []byte, hash any) []byte {
h := sha256.Sum256(pass)
return h[:]
}
// CryptEncoderSSHA256 encodes a password with SHA256
// with the configured salt
func CryptEncoderSSHA256(salt string) func(pass []byte, hash any) []byte {
return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha256.Sum256(p)
return h[:]
}
}
// CryptEncoderSHA384 encodes a password with SHA384
func CryptEncoderSHA384(pass []byte, hash any) []byte {
h := sha512.Sum384(pass)
return h[:]
}
// CryptEncoderSSHA384 encodes a password with SHA384
// with the configured salt
func CryptEncoderSSHA384(salt string) func(pass []byte, hash any) []byte {
return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha512.Sum384(p)
return h[:]
}
}
// CryptEncoderSHA512 encodes a password with SHA512
func CryptEncoderSHA512(pass []byte, hash any) []byte {
h := sha512.Sum512(pass)
return h[:]
}
// CryptEncoderSSHA512 encodes a password with SHA512
// with the configured salt
func CryptEncoderSSHA512(salt string) func(pass []byte, hash any) []byte {
return func(pass []byte, hash any) []byte {
s := []byte(salt)
p := append(pass, s...)
h := sha512.Sum512(p)
return h[:]
}
}
// EOF

54
vendor/github.com/mattn/go-sqlite3/sqlite3_go18.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build cgo && go1.8
// +build cgo,go1.8
package sqlite3
import (
"database/sql/driver"
"context"
)
// Ping implement Pinger.
func (c *SQLiteConn) Ping(ctx context.Context) error {
if c.db == nil {
// must be ErrBadConn for sql to close the database
return driver.ErrBadConn
}
return nil
}
// QueryContext implement QueryerContext.
func (c *SQLiteConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
return c.query(ctx, query, args)
}
// ExecContext implement ExecerContext.
func (c *SQLiteConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
return c.exec(ctx, query, args)
}
// PrepareContext implement ConnPrepareContext.
func (c *SQLiteConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
return c.prepare(ctx, query)
}
// BeginTx implement ConnBeginTx.
func (c *SQLiteConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
return c.begin(ctx)
}
// QueryContext implement QueryerContext.
func (s *SQLiteStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
return s.query(ctx, args)
}
// ExecContext implement ExecerContext.
func (s *SQLiteStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
return s.exec(ctx, args)
}

View File

@@ -0,0 +1,23 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build libsqlite3
// +build libsqlite3
package sqlite3
/*
#cgo CFLAGS: -DUSE_LIBSQLITE3
#cgo linux LDFLAGS: -lsqlite3
#cgo darwin,amd64 LDFLAGS: -L/usr/local/opt/sqlite/lib -lsqlite3
#cgo darwin,amd64 CFLAGS: -I/usr/local/opt/sqlite/include
#cgo darwin,arm64 LDFLAGS: -L/opt/homebrew/opt/sqlite/lib -lsqlite3
#cgo darwin,arm64 CFLAGS: -I/opt/homebrew/opt/sqlite/include
#cgo openbsd LDFLAGS: -lsqlite3
#cgo solaris LDFLAGS: -lsqlite3
#cgo windows LDFLAGS: -lsqlite3
#cgo zos LDFLAGS: -lsqlite3
*/
import "C"

View File

@@ -0,0 +1,85 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build !sqlite_omit_load_extension
// +build !sqlite_omit_load_extension
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#include "sqlite3-binding.h"
#else
#include <sqlite3.h>
#endif
#include <stdlib.h>
*/
import "C"
import (
"errors"
"unsafe"
)
func (c *SQLiteConn) loadExtensions(extensions []string) error {
rv := C.sqlite3_enable_load_extension(c.db, 1)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
for _, extension := range extensions {
if err := c.loadExtension(extension, nil); err != nil {
C.sqlite3_enable_load_extension(c.db, 0)
return err
}
}
rv = C.sqlite3_enable_load_extension(c.db, 0)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
return nil
}
// LoadExtension load the sqlite3 extension.
func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
rv := C.sqlite3_enable_load_extension(c.db, 1)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
if err := c.loadExtension(lib, &entry); err != nil {
C.sqlite3_enable_load_extension(c.db, 0)
return err
}
rv = C.sqlite3_enable_load_extension(c.db, 0)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
return nil
}
func (c *SQLiteConn) loadExtension(lib string, entry *string) error {
clib := C.CString(lib)
defer C.free(unsafe.Pointer(clib))
var centry *C.char
if entry != nil {
centry = C.CString(*entry)
defer C.free(unsafe.Pointer(centry))
}
var errMsg *C.char
defer C.sqlite3_free(unsafe.Pointer(errMsg))
rv := C.sqlite3_load_extension(c.db, clib, centry, &errMsg)
if rv != C.SQLITE_OK {
return errors.New(C.GoString(errMsg))
}
return nil
}

View File

@@ -0,0 +1,25 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build sqlite_omit_load_extension
// +build sqlite_omit_load_extension
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_OMIT_LOAD_EXTENSION
*/
import "C"
import (
"errors"
)
func (c *SQLiteConn) loadExtensions(extensions []string) error {
return errors.New("Extensions have been disabled for static builds")
}
func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
return errors.New("Extensions have been disabled for static builds")
}

View File

@@ -0,0 +1,16 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build sqlite_allow_uri_authority
// +build sqlite_allow_uri_authority
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ALLOW_URI_AUTHORITY
#cgo LDFLAGS: -lm
*/
import "C"

View File

@@ -0,0 +1,16 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build !windows && sqlite_app_armor
// +build !windows,sqlite_app_armor
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_API_ARMOR
#cgo LDFLAGS: -lm
*/
import "C"

View File

@@ -0,0 +1,22 @@
//go:build sqlite_column_metadata
// +build sqlite_column_metadata
package sqlite3
/*
#ifndef USE_LIBSQLITE3
#cgo CFLAGS: -DSQLITE_ENABLE_COLUMN_METADATA
#include <sqlite3-binding.h>
#else
#include <sqlite3.h>
#endif
*/
import "C"
// ColumnTableName returns the table that is the origin of a particular result
// column in a SELECT statement.
//
// See https://www.sqlite.org/c3ref/column_database_name.html
func (s *SQLiteStmt) ColumnTableName(n int) string {
return C.GoString(C.sqlite3_column_table_name(s.s, C.int(n)))
}

View File

@@ -0,0 +1,16 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build sqlite_foreign_keys
// +build sqlite_foreign_keys
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_DEFAULT_FOREIGN_KEYS=1
#cgo LDFLAGS: -lm
*/
import "C"

15
vendor/github.com/mattn/go-sqlite3/sqlite3_opt_fts5.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build sqlite_fts5 || fts5
// +build sqlite_fts5 fts5
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_ENABLE_FTS5
#cgo LDFLAGS: -lm
*/
import "C"

20
vendor/github.com/mattn/go-sqlite3/sqlite3_opt_icu.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build sqlite_icu || icu
// +build sqlite_icu icu
package sqlite3
/*
#cgo LDFLAGS: -licuuc -licui18n
#cgo CFLAGS: -DSQLITE_ENABLE_ICU
#cgo darwin,amd64 CFLAGS: -I/usr/local/opt/icu4c/include
#cgo darwin,amd64 LDFLAGS: -L/usr/local/opt/icu4c/lib
#cgo darwin,arm64 CFLAGS: -I/opt/homebrew/opt/icu4c/include
#cgo darwin,arm64 LDFLAGS: -L/opt/homebrew/opt/icu4c/lib
#cgo openbsd LDFLAGS: -lsqlite3
*/
import "C"

View File

@@ -0,0 +1,16 @@
// Copyright (C) 2019 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
// Copyright (C) 2018 G.J.R. Timmer <gjr.timmer@gmail.com>.
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
//go:build sqlite_introspect
// +build sqlite_introspect
package sqlite3
/*
#cgo CFLAGS: -DSQLITE_INTROSPECTION_PRAGMAS
#cgo LDFLAGS: -lm
*/
import "C"

Some files were not shown because too many files have changed in this diff Show More