project move

This commit is contained in:
Pavel Vorobyov
2019-09-24 11:04:48 +03:00
parent eb2a495406
commit 7e2dec0ef0
33 changed files with 5613 additions and 1 deletions

101
README.md
View File

@@ -1,2 +1,101 @@
# xc # xc
New generation parallel executer written in Go
XC is a parallel executer written in Go. It offers you a nice CLI with a number of handy commands to run tasks on servers in parallel, autocompletion, a simple DSL to express host lists in a laconic way and things like that.
Try start with running xc and entering the `help` command. The most useful commands are `exec`, `runscript`, `hostlist` and `distribute`. Feel free to use `help <command or topic>` to get more info on those.
## Building
Xc is structure using go modules so the building process is as easy as typing `go build -o xc cmd/xc/main.go`
## DSL
In xc hosts are combined into groups and may belong to a datacenter, groups may be combined into other groups, and any group may belong to a workgroup (you may think of workgroups as of projects), which reflects _inventoree_ storage structure.
A reference note: _Inventoree_, previously known as _conductor_ was originally developed as a http backend for one of early versions of xc (called _executer_ those days).
Every host list may be written as a expression of comma-separated tokens, where every token represents a host, a group of host, or a whole workgroup of groups of hosts.
`host1.example.com` is a single host.
`%group1` represents a group.
`*workgroup` represents a workgroup.
`@some_dc` may be postfixed to a token to filter the resulting hostlist by a datacenter
`#tag1` may be postfixed to a token to filter the result by a given tag
Any token may be excluded if it starts with `-` symbol.
```
Some self-explanatory examples:
host1,host2 - simple host list containing 2 hosts
%group1 - a group of hosts taken from inventoree
%group1,host1 - all hosts from group1, plus host1
%group1,-host2 - all hosts from group1, excluding(!) host2
%group2@dc1 - all hosts from group2, located in datacenter dc1
*myworkgroup@dc2,-%group3,host5 - all hosts from wg "myworkgroup" excluding hosts from group3, plus host5
%group5#tag1 - all hosts from group5 tagged with tag1
```
## Backends
At the moment xc supports 3 backends to load hosts/groups data from
### Ini file
The most easy to start with is "ini" backend. Configuration is as simple as these three lines:
```
[backend]
type = ini
filename = ~/xcdata.ini
```
The format of the ini-file itself is simple but flexible as you can see in the following self-explanatory example
```
[datacenters]
dc1
dc2
[workgroups]
workgroup1
[groups]
group1 wg=workgroup1 tags=tag1,tag2
group1.1 wg=workgroup1 parent=group1 tags=tag3,tag4
[hosts]
host1.example.com group=group1 dc=dc1
host2.example.com group=group1.1 dc=dc2
```
All the fields given in equation format, i.e., groups/dcs/tags for hosts or worgroups/tags for groups are optional.
### Conductor (Legacy Inventoree)
**Conductor** backend uses legacy v1 API of Conductor/Inventoree 5.x-6.x. This API doesn't require authentication
and provides the whole data xc may use via single handler `/api/v1/open/executer_data`. It's easy to configure as follows:
```
[backend]
type = conductor
# url is the base url of the conductor/inventoree instance
url = http://c.inventoree.ru
# you can optionally restrict the data to specified work groups
work_groups = workgroup1,workgroup2
```
### Inventoree
**Inventoree** backend utilizes the modern inventoree API v2 which is supported since inventoree 7.0. There's no specific handler for xc though so the data loading is performed in several steps. It's still faster than conductor backend as it doesn't rely on internal inventoree recursive data fetching. The configuration is similar to **conductor** however it's mandatory to configure `auth_token` option is inventoree doesn't have API handlers without authentication.
```
[backend]
type = inventoree
url = http://v7.inventoree.ru
work_groups = ...
auth_token = ...
```

View File

@@ -0,0 +1,254 @@
package conductor
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"regexp"
"strings"
"time"
"github.com/viert/xc/config"
"github.com/viert/xc/store"
"github.com/viert/xc/term"
)
// New creates a new instance of Conductor backend
func New(cfg *config.XCConfig) (*Conductor, error) {
c := &Conductor{
cacheTTL: cfg.CacheTTL,
cacheDir: cfg.CacheDir,
hosts: make([]*store.Host, 0),
groups: make([]*store.Group, 0),
workgroups: make([]*store.WorkGroup, 0),
datacenters: make([]*store.Datacenter, 0),
}
options := cfg.BackendCfg.Options
// workgroups configuration
wgString, found := options["work_groups"]
if !found || wgString == "" {
c.workgroupNames = make([]string, 0)
} else {
splitExpr := regexp.MustCompile(`\s*,\s*`)
c.workgroupNames = splitExpr.Split(wgString, -1)
}
// url configuration
url, found := options["url"]
if !found {
return nil, fmt.Errorf("Inventoree backend URL is not configured")
}
c.url = url
return c, nil
}
// Hosts exported backend method
func (c *Conductor) Hosts() []*store.Host {
return c.hosts
}
// Groups exported backend method
func (c *Conductor) Groups() []*store.Group {
return c.groups
}
// WorkGroups exported backend method
func (c *Conductor) WorkGroups() []*store.WorkGroup {
return c.workgroups
}
// Datacenters exported backend method
func (c *Conductor) Datacenters() []*store.Datacenter {
return c.datacenters
}
// Reload forces reloading data from HTTP(S)
func (c *Conductor) Reload() error {
err := c.loadRemote()
if err != nil {
// trying to use cache
return c.loadLocal()
}
return nil
}
// Load tries to load data from cache unless it's expired
// In case of cache expiration or absense it triggers Reload()
func (c *Conductor) Load() error {
var err error
if c.cacheExpired() {
return c.Reload()
}
// trying to use cache
err = c.loadLocal()
if err != nil {
// if it failed, trying to get data from remote
return c.loadRemote()
}
return nil
}
func (c *Conductor) loadLocal() error {
data, err := ioutil.ReadFile(c.cacheFilename())
if err != nil {
return err
}
lc := new(cache)
err = json.Unmarshal(data, lc)
if err != nil {
return err
}
c.extractCache(lc)
term.Warnf("Hosts loaded from cache\n")
return nil
}
func (c *Conductor) cacheExpired() bool {
st, err := os.Stat(c.cacheFilename())
if err != nil {
if os.IsNotExist(err) {
// no cache in general means that it's been expired
return true
}
}
modifiedAt := st.ModTime()
return modifiedAt.Add(c.cacheTTL).Before(time.Now())
}
func (c *Conductor) cacheFilename() string {
var wglist string
if len(c.workgroupNames) > 0 {
wglist = strings.Join(c.workgroupNames, "_")
} else {
wglist = "all"
}
fn := fmt.Sprintf("cnd_cache_%s.json", wglist)
return path.Join(c.cacheDir, fn)
}
func (c *Conductor) saveCache(lc *cache) error {
_, err := os.Stat(c.cacheDir)
if err != nil && os.IsNotExist(err) {
err = os.MkdirAll(c.cacheDir, 0755)
if err != nil {
return fmt.Errorf("Error creating cache dir: %s", err)
}
}
f, err := os.Create(c.cacheFilename())
if err != nil {
return err
}
defer f.Close()
data, err := json.Marshal(lc)
if err != nil {
return err
}
f.Write(data)
return nil
}
func (c *Conductor) extractCache(lc *cache) {
c.datacenters = make([]*store.Datacenter, 0)
c.workgroups = make([]*store.WorkGroup, 0)
c.groups = make([]*store.Group, 0)
c.hosts = make([]*store.Host, 0)
for _, dc := range lc.Datacenters {
c.datacenters = append(c.datacenters, &store.Datacenter{
ID: dc.ID,
Name: dc.Name,
Description: dc.Description,
ParentID: dc.ParentID,
})
}
for _, wg := range lc.WorkGroups {
c.workgroups = append(c.workgroups, &store.WorkGroup{
ID: wg.ID,
Name: wg.Name,
Description: wg.Description,
})
}
for _, g := range lc.Groups {
group := &store.Group{
ID: g.ID,
Name: g.Name,
Description: g.Description,
Tags: g.Tags,
WorkGroupID: g.WorkGroupID,
}
if len(g.ParentIDs) > 0 {
group.ParentID = g.ParentIDs[0]
}
c.groups = append(c.groups, group)
}
for _, h := range lc.Hosts {
c.hosts = append(c.hosts, &store.Host{
ID: h.ID,
FQDN: h.FQDN,
Aliases: h.Aliases,
Tags: h.Tags,
GroupID: h.GroupID,
DatacenterID: h.DatacenterID,
})
}
}
func (c *Conductor) httpGet(path string) ([]byte, error) {
client := &http.Client{}
url := fmt.Sprintf("%s%s", c.url, path)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("Status code %d while fetching %s", resp.StatusCode, url)
}
return ioutil.ReadAll(resp.Body)
}
func (c *Conductor) loadRemote() error {
var data []byte
var err error
term.Warnf("Loading executer data...\n")
path := "/api/v1/open/executer_data"
if len(c.workgroupNames) > 0 {
wglist := strings.Join(c.workgroupNames, ",")
path += fmt.Sprintf("?work_groups=%s", wglist)
}
data, err = c.httpGet(path)
if err != nil {
return err
}
apiResponse := new(api)
err = json.Unmarshal(data, apiResponse)
if err != nil {
return err
}
lc := apiResponse.Data
err = c.saveCache(lc)
if err != nil {
term.Errorf("Error saving cacne: %s\n", err)
} else {
term.Successf("Cache saved to %s\n", c.cacheFilename())
}
c.extractCache(lc)
return nil
}

View File

@@ -0,0 +1,62 @@
package conductor
import (
"time"
"github.com/viert/xc/store"
)
// Conductor is a backend based on Inventoree legacy v1 API
type Conductor struct {
workgroupNames []string
cacheTTL time.Duration
cacheDir string
url string
hosts []*store.Host
groups []*store.Group
workgroups []*store.WorkGroup
datacenters []*store.Datacenter
}
type datacenter struct {
ID string `json:"_id"`
Name string `json:"name"`
Description string `json:"description"`
ParentID string `json:"parent_id"`
}
type workgroup struct {
ID string `json:"_id"`
Name string `json:"name"`
Description string `json:"description"`
}
type host struct {
ID string `json:"_id"`
FQDN string `json:"fqdn"`
Description string `json:"description"`
Tags []string `json:"tags"`
Aliases []string `json:"aliases"`
GroupID string `json:"group_id"`
DatacenterID string `json:"datacenter_id"`
}
type group struct {
ID string `json:"_id"`
Name string `json:"name"`
Description string `json:"description"`
Tags []string `json:"tags"`
WorkGroupID string `json:"work_group_id"`
ParentIDs []string `json:"parent_ids"`
}
type cache struct {
Datacenters []*datacenter `json:"datacenters"`
Groups []*group `json:"groups"`
WorkGroups []*workgroup `json:"work_groups"`
Hosts []*host `json:"hosts"`
}
type api struct {
Data *cache `json:"data"`
}

View File

@@ -0,0 +1,368 @@
package inventoree
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"regexp"
"strings"
"time"
"github.com/viert/xc/config"
"github.com/viert/xc/store"
"github.com/viert/xc/term"
)
// New creates and cofigures inventoree-based backend
func New(cfg *config.XCConfig) (*Inventoree, error) {
var workgroupNames []string
options := cfg.BackendCfg.Options
// workgroups configuration
wgString, found := options["work_groups"]
if !found || wgString == "" {
workgroupNames = make([]string, 0)
} else {
splitExpr := regexp.MustCompile(`\s*,\s*`)
workgroupNames = splitExpr.Split(wgString, -1)
}
// url configuration
url, found := options["url"]
if !found {
return nil, fmt.Errorf("Inventoree backend URL is not configured")
}
// auth configuration
authToken, found := options["auth_token"]
if !found {
return nil, fmt.Errorf("Inventoree auth_token option is missing")
}
return &Inventoree{
workgroupNames: workgroupNames,
cacheTTL: cfg.CacheTTL,
cacheDir: cfg.CacheDir,
url: url,
authToken: authToken,
}, nil
}
// Hosts exported backend method
func (i *Inventoree) Hosts() []*store.Host {
return i.hosts
}
// Groups exported backend method
func (i *Inventoree) Groups() []*store.Group {
return i.groups
}
// WorkGroups exported backend method
func (i *Inventoree) WorkGroups() []*store.WorkGroup {
return i.workgroups
}
// Datacenters exported backend method
func (i *Inventoree) Datacenters() []*store.Datacenter {
return i.datacenters
}
// Reload forces reloading data from HTTP(S)
func (i *Inventoree) Reload() error {
err := i.loadRemote()
if err != nil {
// trying to use cache
return i.loadLocal()
}
return nil
}
// Load tries to load data from cache unless it's expired
// In case of cache expiration or absense it triggers Reload()
func (i *Inventoree) Load() error {
var err error
if i.cacheExpired() {
return i.Reload()
}
// trying to use cache
err = i.loadLocal()
if err != nil {
// if it failed, trying to get data from remote
return i.loadRemote()
}
return nil
}
func (i *Inventoree) inventoreeGet(path string) ([]byte, error) {
client := &http.Client{}
url := fmt.Sprintf("%s%s", i.url, path)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
req.Header.Add("X-Api-Auth-Token", i.authToken)
resp, err := client.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("Status code %d while fetching %s", resp.StatusCode, url)
}
return ioutil.ReadAll(resp.Body)
}
func (i *Inventoree) loadLocal() error {
data, err := ioutil.ReadFile(i.cacheFilename())
if err != nil {
return err
}
lc := new(cache)
err = json.Unmarshal(data, lc)
if err != nil {
return err
}
i.extractCache(lc)
term.Warnf("Hosts loaded from cache\n")
return nil
}
func (i *Inventoree) loadRemote() error {
var data []byte
var count int
var err error
lc := new(cache)
lc.Datacenters = make([]*datacenter, 0)
lc.Groups = make([]*group, 0)
lc.WorkGroups = make([]*workgroup, 0)
lc.Hosts = make([]*host, 0)
term.Warnf("Loading datacenters...")
data, err = i.inventoreeGet("/api/v2/datacenters/?_fields=_id,name,description,parent_id&_nopaging=true")
if err != nil {
return err
}
dcdata := &apiDatacenters{}
err = json.Unmarshal(data, dcdata)
if err != nil {
return err
}
count = 0
for _, dc := range dcdata.Data {
lc.Datacenters = append(lc.Datacenters, dc)
count++
}
term.Warnf("%d loaded\n", count)
term.Warnf("Loading workgroups...")
count = 0
if len(i.workgroupNames) > 0 {
for _, wgname := range i.workgroupNames {
term.Warnf(wgname + "..")
path := fmt.Sprintf("/api/v2/work_groups/%s?_fields=_id,name,description", wgname)
data, err = i.inventoreeGet(path)
if err != nil {
term.Errorf("\nError loading workgroup %s: %s\n", wgname, err)
continue
}
wgdata := &apiWorkgroup{}
err = json.Unmarshal(data, wgdata)
if err != nil {
term.Errorf("\nError loading workgroup %s: %s\n", wgname, err)
continue
}
lc.WorkGroups = append(lc.WorkGroups, wgdata.Data)
count++
}
} else {
path := fmt.Sprintf("/api/v2/work_groups/?_fields=_id,name,description&_nopaging=true")
data, err = i.inventoreeGet(path)
if err == nil {
wgdata := &apiWorkgroupList{}
err = json.Unmarshal(data, wgdata)
if err == nil {
for _, wg := range wgdata.Data {
lc.WorkGroups = append(lc.WorkGroups, wg)
count++
}
}
}
if err != nil {
term.Errorf("\nError loading workgroups: %s\n", err)
}
}
term.Warnf("%d loaded\n", count)
term.Warnf("Loading groups...")
count = 0
if len(i.workgroupNames) > 0 {
for _, wgname := range i.workgroupNames {
path := fmt.Sprintf("/api/v2/groups/?work_group_id=%s&_fields=_id,name,parent_id,local_tags,description,work_group_id&_nopaging=true", wgname)
data, err = i.inventoreeGet(path)
if err != nil {
term.Errorf("%s..", wgname)
continue
}
gdata := &apiGroups{}
err = json.Unmarshal(data, gdata)
if err != nil {
return err
}
for _, g := range gdata.Data {
lc.Groups = append(lc.Groups, g)
count++
}
term.Warnf("%s..", wgname)
}
} else {
path := "/api/v2/groups/?_fields=_id,name,parent_id,local_tags,description,work_group_id&_nopaging=true"
data, err = i.inventoreeGet(path)
if err != nil {
return err
}
gdata := &apiGroups{}
err = json.Unmarshal(data, gdata)
if err != nil {
return err
}
for _, g := range gdata.Data {
lc.Groups = append(lc.Groups, g)
count++
}
}
term.Warnf("%d loaded\n", count)
term.Warnf("Loading hosts...")
count = 0
for _, wg := range lc.WorkGroups {
path := fmt.Sprintf("/api/v2/hosts/?work_group_id=%s&_fields=_id,fqdn,local_tags,group_id,datacenter_id,aliases,description&_nopaging=true", wg.ID)
data, err = i.inventoreeGet(path)
if err != nil {
term.Errorf("\nError loading hosts of work group %s: %s", wg.Name, err)
continue
}
hdata := &apiHosts{}
err = json.Unmarshal(data, hdata)
if err != nil {
term.Errorf("\nError loading hosts of work group %s: %s", wg.Name, err)
continue
}
for _, h := range hdata.Data {
lc.Hosts = append(lc.Hosts, h)
count++
}
term.Warnf(wg.Name + "..")
}
term.Warnf("%d loaded\n", count)
err = i.saveCache(lc)
if err != nil {
term.Errorf("Error saving cacne: %s\n", err)
} else {
term.Successf("Cache saved to %s\n", i.cacheFilename())
}
i.extractCache(lc)
return nil
}
func (i *Inventoree) cacheExpired() bool {
st, err := os.Stat(i.cacheFilename())
if err != nil {
if os.IsNotExist(err) {
// no cache in general means that it's been expired
return true
}
}
modifiedAt := st.ModTime()
return modifiedAt.Add(i.cacheTTL).Before(time.Now())
}
func (i *Inventoree) cacheFilename() string {
var wglist string
if len(i.workgroupNames) > 0 {
wglist = strings.Join(i.workgroupNames, "_")
} else {
wglist = "all"
}
fn := fmt.Sprintf("inv_cache_%s.json", wglist)
return path.Join(i.cacheDir, fn)
}
func (i *Inventoree) saveCache(lc *cache) error {
_, err := os.Stat(i.cacheDir)
if err != nil && os.IsNotExist(err) {
err = os.MkdirAll(i.cacheDir, 0755)
if err != nil {
return fmt.Errorf("Error creating cache dir: %s", err)
}
}
f, err := os.Create(i.cacheFilename())
if err != nil {
return err
}
defer f.Close()
data, err := json.Marshal(lc)
if err != nil {
return err
}
f.Write(data)
return nil
}
func (i *Inventoree) extractCache(lc *cache) {
i.datacenters = make([]*store.Datacenter, 0)
i.workgroups = make([]*store.WorkGroup, 0)
i.groups = make([]*store.Group, 0)
i.hosts = make([]*store.Host, 0)
for _, dc := range lc.Datacenters {
i.datacenters = append(i.datacenters, &store.Datacenter{
ID: dc.ID,
Name: dc.Name,
Description: dc.Description,
ParentID: dc.ParentID,
})
}
for _, wg := range lc.WorkGroups {
i.workgroups = append(i.workgroups, &store.WorkGroup{
ID: wg.ID,
Name: wg.Name,
Description: wg.Description,
})
}
for _, g := range lc.Groups {
i.groups = append(i.groups, &store.Group{
ID: g.ID,
Name: g.Name,
Description: g.Description,
ParentID: g.ParentID,
Tags: g.Tags,
WorkGroupID: g.WorkGroupID,
})
}
for _, h := range lc.Hosts {
i.hosts = append(i.hosts, &store.Host{
ID: h.ID,
FQDN: h.FQDN,
Aliases: h.Aliases,
Tags: h.Tags,
GroupID: h.GroupID,
DatacenterID: h.DatacenterID,
})
}
}

View File

@@ -0,0 +1,79 @@
package inventoree
import (
"time"
"github.com/viert/xc/store"
)
// Inventoree is inventoree backend based on v2 API
type Inventoree struct {
workgroupNames []string
cacheTTL time.Duration
cacheDir string
url string
authToken string
hosts []*store.Host
groups []*store.Group
workgroups []*store.WorkGroup
datacenters []*store.Datacenter
}
type datacenter struct {
ID string `json:"_id"`
Name string `json:"name"`
Description string `json:"description"`
ParentID string `json:"parent_id"`
}
type workgroup struct {
ID string `json:"_id"`
Name string `json:"name"`
Description string `json:"description"`
}
type host struct {
ID string `json:"_id"`
FQDN string `json:"fqdn"`
Description string `json:"description"`
Tags []string `json:"local_tags"`
Aliases []string `json:"aliases"`
GroupID string `json:"group_id"`
DatacenterID string `json:"datacenter_id"`
}
type group struct {
ID string `json:"_id"`
Name string `json:"name"`
Description string `json:"description"`
Tags []string `json:"local_tags"`
WorkGroupID string `json:"work_group_id"`
ParentID string `json:"parent_id"`
}
type cache struct {
Datacenters []*datacenter `json:"datacenters"`
Groups []*group `json:"groups"`
WorkGroups []*workgroup `json:"work_groups"`
Hosts []*host `json:"hosts"`
}
type apiDatacenters struct {
Data []*datacenter `json:"data"`
}
type apiWorkgroup struct {
Data *workgroup `json:"data"`
}
type apiWorkgroupList struct {
Data []*workgroup `json:"data"`
}
type apiHosts struct {
Data []*host `json:"data"`
}
type apiGroups struct {
Data []*group `json:"data"`
}

View File

@@ -0,0 +1,270 @@
package localini
import (
"bufio"
"fmt"
"os"
"strings"
"github.com/viert/xc/config"
"github.com/viert/xc/store"
)
type parseSection int
const (
sectionWorkgroups parseSection = iota
sectionDatacenters
sectionGroups
sectionHosts
sectionNone
)
var (
lineCount = 0
)
// LocalIni backend loads hosts data from ini file
type LocalIni struct {
filename string
hosts []*store.Host
groups []*store.Group
workgroups []*store.WorkGroup
datacenters []*store.Datacenter
}
// New creates a new LocalIni backend
func New(cfg *config.XCConfig) (*LocalIni, error) {
filename, found := cfg.BackendCfg.Options["filename"]
if !found {
return nil, fmt.Errorf("localini backend filename option is missing")
}
return &LocalIni{filename: filename}, nil
}
// Hosts exported backend method
func (li *LocalIni) Hosts() []*store.Host {
return li.hosts
}
// Groups exported backend method
func (li *LocalIni) Groups() []*store.Group {
return li.groups
}
// WorkGroups exported backend method
func (li *LocalIni) WorkGroups() []*store.WorkGroup {
return li.workgroups
}
// Datacenters exported backend method
func (li *LocalIni) Datacenters() []*store.Datacenter {
return li.datacenters
}
// Load loads the data from file
func (li *LocalIni) Load() error {
f, err := os.Open(li.filename)
if err != nil {
return err
}
defer f.Close()
return li.read(f)
}
// Reload force reloads data from file
func (li *LocalIni) Reload() error {
return li.Load()
}
func (li *LocalIni) read(f *os.File) error {
var line string
li.datacenters = make([]*store.Datacenter, 0)
li.hosts = make([]*store.Host, 0)
li.groups = make([]*store.Group, 0)
li.workgroups = make([]*store.WorkGroup, 0)
lineCount = 0
section := sectionNone
scan := bufio.NewScanner(f)
for scan.Scan() {
lineCount++
line = strings.TrimSpace(scan.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
switch line {
case "[workgroups]":
section = sectionWorkgroups
case "[groups]":
section = sectionGroups
case "[datacenters]":
section = sectionDatacenters
case "[hosts]":
section = sectionHosts
default:
var err error
switch section {
case sectionNone:
err = fmt.Errorf("Unexpected line #%d outside sections: %s", lineCount, line)
case sectionWorkgroups:
err = li.addWorkgroup(line)
case sectionGroups:
err = li.addGroup(line)
case sectionDatacenters:
err = li.addDatacenter(line)
case sectionHosts:
err = li.addHost(line)
}
if err != nil {
return err
}
}
}
return nil
}
func parseLine(line string) (map[string]string, error) {
data := make(map[string]string)
tokens := strings.Split(line, " ")
if len(tokens) < 1 {
return nil, fmt.Errorf("Malformed line, can't read workgroup name at line %d: %s", lineCount, line)
}
data["id"] = tokens[0]
data["name"] = tokens[0]
tokens = tokens[1:]
for _, token := range tokens {
kv := strings.Split(token, "=")
if len(kv) != 2 {
return nil, fmt.Errorf("Invalid token \"%s\", expected key=value format at line %d", token, lineCount)
}
data[kv[0]] = kv[1]
}
return data, nil
}
func (li *LocalIni) addWorkgroup(line string) error {
data, err := parseLine(line)
if err != nil {
return err
}
wg := new(store.WorkGroup)
for key, value := range data {
switch key {
case "id":
wg.ID = value
case "name":
wg.Name = value
case "description":
wg.Description = value
default:
return fmt.Errorf("Invalid token %s at line %d: %s", key, lineCount, line)
}
}
li.workgroups = append(li.workgroups, wg)
return nil
}
func (li *LocalIni) addDatacenter(line string) error {
data, err := parseLine(line)
if err != nil {
return err
}
dc := new(store.Datacenter)
for key, value := range data {
switch key {
case "id":
dc.ID = value
case "name":
dc.Name = value
case "parent_id":
fallthrough
case "parent":
dc.ParentID = value
case "desc":
fallthrough
case "description":
dc.Description = value
default:
return fmt.Errorf("Invalid token %s at line %d: %s", key, lineCount, line)
}
}
li.datacenters = append(li.datacenters, dc)
return nil
}
func (li *LocalIni) addGroup(line string) error {
data, err := parseLine(line)
if err != nil {
return err
}
group := new(store.Group)
for key, value := range data {
switch key {
case "id":
group.ID = value
case "name":
group.Name = value
case "parent_id":
fallthrough
case "parent":
group.ParentID = value
case "tags":
group.Tags = strings.Split(value, ",")
case "description":
group.Description = value
case "workgroup":
fallthrough
case "wg":
fallthrough
case "wg_id":
group.WorkGroupID = value
default:
return fmt.Errorf("Invalid token %s at line %d: %s", key, lineCount, line)
}
}
li.groups = append(li.groups, group)
return nil
}
func (li *LocalIni) addHost(line string) error {
data, err := parseLine(line)
if err != nil {
return err
}
host := new(store.Host)
for key, value := range data {
switch key {
case "id":
host.ID = value
case "name":
host.FQDN = value
case "group_id":
fallthrough
case "group":
host.GroupID = value
case "aliases":
host.Aliases = strings.Split(value, ",")
case "tags":
host.Tags = strings.Split(value, ",")
case "description":
host.Description = value
case "datacenter":
fallthrough
case "datacenter_id":
fallthrough
case "dc":
fallthrough
case "dc_id":
host.DatacenterID = value
default:
return fmt.Errorf("Invalid token %s at line %d: %s", key, lineCount, line)
}
}
li.hosts = append(li.hosts, host)
return nil
}

87
cli/alias.go Normal file
View File

@@ -0,0 +1,87 @@
package cli
import (
"fmt"
"strconv"
"github.com/viert/xc/term"
)
type alias struct {
name string
proxy string
}
func (c *Cli) createAlias(name []rune, proxy []rune) error {
al := &alias{string(name), string(proxy)}
if _, found := c.aliases[al.name]; !found {
for _, cmd := range c.completer.cmds {
if cmd == al.name {
return fmt.Errorf("Can't create alias \"%s\": such command already exists", al.name)
}
}
}
c.aliases[al.name] = al
c.handlers[al.name] = c.runAlias
c.completer.cmds = append(c.completer.cmds, al.name)
return nil
}
func (c *Cli) runAlias(name string, argsLine string, args ...string) {
c.aliasRecursionCount--
if c.aliasRecursionCount < 0 {
term.Errorf("Maximum recursion reached for alias referencing\n")
return
}
al, found := c.aliases[name]
if !found {
term.Errorf("Alias \"%s\" is defined but not found, this must be a bug\n", name)
return
}
cmdLine, err := exterpolate(al, argsLine, args...)
if err != nil {
term.Errorf("Error running alias \"%s\": %s\n", al.name, err)
return
}
c.OneCmd(cmdLine)
}
func exterpolate(al *alias, argsLine string, args ...string) (string, error) {
res := ""
for i := 0; i < len(al.proxy); i++ {
if i < len(al.proxy)-1 && al.proxy[i] == '#' {
an, err := strconv.ParseInt(string(al.proxy[i+1]), 10, 64)
if err == nil {
argNum := int(an - 1)
if argNum >= len(args) {
return "", fmt.Errorf("alias \"%s\" needs argument #%d but only %d arguments are given", al.name, int(an), len(args))
}
res += args[argNum]
i++
continue
}
} else if al.proxy[i+1] == '*' {
res += argsLine
i++
continue
}
res += string(al.proxy[i])
}
return res, nil
}
func (c *Cli) removeAlias(name []rune) error {
sname := string(name)
_, found := c.aliases[sname]
if !found {
return fmt.Errorf("alias \"%s\" not found", sname)
}
delete(c.aliases, sname)
delete(c.handlers, sname)
c.completer.removeCommand(sname)
return nil
}

411
cli/cli.go Normal file
View File

@@ -0,0 +1,411 @@
package cli
import (
"bufio"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/chzyer/readline"
"github.com/viert/xc/config"
"github.com/viert/xc/log"
"github.com/viert/xc/remote"
"github.com/viert/xc/store"
"github.com/viert/xc/term"
)
type cmdHandler func(string, string, ...string)
type execMode int
// Cli is the comand line interface object
type Cli struct {
rl *readline.Instance
stopped bool
handlers map[string]cmdHandler
aliases map[string]*alias
completer *completer
store *store.Store
mode execMode
user string
raiseType remote.RaiseType
raisePasswd string
remoteTmpDir string
delay int
sshThreads int
connectTimeout int
exitConfirm bool
execConfirm bool
prependHostnames bool
progressBar bool
debug bool
interpreter string
sudoInterpreter string
suInterpreter string
curDir string
outputFile *os.File
outputFileName string
aliasRecursionCount int
}
const (
emSerial execMode = iota
emParallel
emCollapse
maxAliasRecursion = 10
maxSSHThreadsSane = 1024
)
var (
whitespace = regexp.MustCompile(`\s+`)
modeMap = map[execMode]string{
emSerial: "serial",
emParallel: "parallel",
emCollapse: "collapse",
}
)
// New creates a new instance of CLI
func New(cfg *config.XCConfig, backend store.Backend) (*Cli, error) {
var err error
err = log.Initialize(cfg.LogFile)
if err != nil {
term.Errorf("Error initializing logger: %s\n", err)
}
cli := new(Cli)
st, err := store.CreateStore(backend)
if err != nil {
term.Errorf("Error initializing backend: %s\n", err)
return nil, err
}
cli.store = st
cli.stopped = false
cli.aliases = make(map[string]*alias)
cli.setupCmdHandlers()
cfg.Readline.AutoComplete = cli.completer
cli.rl, err = readline.NewEx(cfg.Readline)
if err != nil {
return nil, err
}
cli.exitConfirm = cfg.ExitConfirm
cli.execConfirm = cfg.ExecConfirm
cli.delay = cfg.Delay
cli.user = cfg.User
cli.sshThreads = cfg.SSHThreads
cli.prependHostnames = cfg.PrependHostnames
cli.progressBar = cfg.ProgressBar
cli.debug = cfg.Debug
cli.connectTimeout = cfg.SSHConnectTimeout
cli.remoteTmpDir = cfg.RemoteTmpdir
// output
cli.outputFileName = ""
cli.outputFile = nil
remote.Initialize(cli.sshThreads, cli.user)
remote.SetPrependHostnames(cli.prependHostnames)
remote.SetRemoteTmpdir(cfg.RemoteTmpdir)
remote.SetProgressBar(cli.progressBar)
remote.SetConnectTimeout(cli.connectTimeout)
remote.SetDebug(cli.debug)
// interpreter
cli.setInterpreter("none", cfg.Interpreter)
cli.setInterpreter("sudo", cfg.SudoInterpreter)
cli.setInterpreter("su", cfg.SuInterpreter)
cli.curDir, err = os.Getwd()
if err != nil {
term.Errorf("Error determining current directory: %s\n", err)
cli.curDir = "."
}
cli.doMode("mode", "mode", cfg.Mode)
cli.setPrompt()
return cli, nil
}
func (c *Cli) setPrompt() {
rts := ""
rtbold := false
rtcolor := term.CGreen
pr := fmt.Sprintf("[%s]", strings.Title(modeMap[c.mode]))
switch c.mode {
case emSerial:
if c.delay > 0 {
pr = fmt.Sprintf("[Serial:%d]", c.delay)
}
pr = term.Cyan(pr)
case emParallel:
pr = term.Yellow(pr)
case emCollapse:
pr = term.Green(pr)
}
pr += " " + term.Colored(c.user, term.CLightBlue, true)
switch c.raiseType {
case remote.RTSu:
rts = "(su"
rtcolor = term.CRed
case remote.RTSudo:
rts = "(sudo"
rtcolor = term.CGreen
default:
rts = ""
}
if rts != "" {
if c.raisePasswd == "" {
rts += "*"
rtbold = true
}
rts += ")"
pr += term.Colored(rts, rtcolor, rtbold)
}
pr += "> "
c.rl.SetPrompt(pr)
}
func (c *Cli) setInterpreter(iType string, interpreter string) {
switch iType {
case "none":
c.interpreter = interpreter
remote.SetInterpreter(interpreter)
case "sudo":
c.sudoInterpreter = interpreter
remote.SetSudoInterpreter(interpreter)
case "su":
c.suInterpreter = interpreter
remote.SetSuInterpreter(interpreter)
default:
term.Errorf("Invalid raise type: %s\n", iType)
}
term.Warnf("Using \"%s\" for commands with %s-type raise\n", interpreter, iType)
}
// Finalize closes resources at xc's exit. Must be called explicitly
func (c *Cli) Finalize() {
if c.outputFile != nil {
c.outputFile.Close()
c.outputFile = nil
}
}
// OneCmd is the main method which literally runs one command
// according to line given in arguments
func (c *Cli) OneCmd(line string) {
var args []string
var argsLine string
line = strings.Trim(line, " \n\t")
cmdRunes, rest := split([]rune(line))
cmd := string(cmdRunes)
if cmd == "" {
return
}
if rest == nil {
args = make([]string, 0)
argsLine = ""
} else {
argsLine = string(rest)
args = whitespace.Split(argsLine, -1)
}
if handler, ok := c.handlers[cmd]; ok {
handler(cmd, argsLine, args...)
} else {
term.Errorf("Unknown command: %s\n", cmd)
}
}
// CmdLoop reads commands and runs OneCmd
func (c *Cli) CmdLoop() {
for !c.stopped {
// Python cmd-style run setPrompt every time in case something has changed
c.setPrompt()
line, err := c.rl.Readline()
if err == readline.ErrInterrupt {
continue
} else if err == io.EOF {
if !c.exitConfirm || c.confirm("Are you sure to exit?") {
c.stopped = true
}
continue
}
c.aliasRecursionCount = maxAliasRecursion
c.OneCmd(line)
}
}
func (c *Cli) confirm(msg string) bool {
reader := bufio.NewReader(os.Stdin)
for {
fmt.Printf("%s [Y/n] ", msg)
response, err := reader.ReadString('\n')
if err == nil {
response = strings.TrimSpace(strings.ToLower(response))
switch response {
case "":
fallthrough
case "y":
return true
case "n":
return false
}
}
fmt.Println()
}
}
func (c *Cli) acquirePasswd() {
if c.raiseType == remote.RTNone {
return
}
if c.raisePasswd == "" {
c.doPasswd("passwd", "")
}
}
func (c *Cli) setOutput(filename string) error {
var err error
f, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err == nil {
if c.outputFile != nil {
c.outputFile.Close()
}
c.outputFile = f
remote.SetOutputFile(c.outputFile)
}
return err
}
func (c *Cli) doexec(mode execMode, argsLine string) {
var r *remote.ExecResult
expr, rest := split([]rune(argsLine))
if rest == nil {
term.Errorf("Usage: exec <inventoree_expr> commands...\n")
return
}
hosts, err := c.store.HostList(expr)
if err != nil {
term.Errorf("Error parsing expression %s: %s\n", string(expr), err)
return
}
if len(hosts) == 0 {
term.Errorf("Empty hostlist\n")
return
}
c.acquirePasswd()
cmd := string(rest)
remote.SetUser(c.user)
remote.SetRaise(c.raiseType)
remote.SetPassword(c.raisePasswd)
if c.execConfirm {
fmt.Printf("%s\n", term.Yellow(term.HR(len(cmd)+5)))
fmt.Printf("%s\n%s\n\n", term.Yellow("Hosts:"), strings.Join(hosts, ", "))
fmt.Printf("%s\n%s\n\n", term.Yellow("Command:"), cmd)
if !c.confirm("Are you sure?") {
return
}
fmt.Printf("%s\n\n", term.Yellow(term.HR(len(cmd)+5)))
}
remote.WriteOutput(fmt.Sprintf("==== exec %s\n", argsLine))
switch mode {
case emParallel:
r = remote.RunParallel(hosts, cmd)
case emCollapse:
r = remote.RunCollapse(hosts, cmd)
r.PrintOutputMap()
case emSerial:
r = remote.RunSerial(hosts, cmd, c.delay)
}
r.Print()
}
func (c *Cli) dorunscript(mode execMode, argsLine string) {
var (
r *remote.ExecResult
expr []rune
rest []rune
hosts []string
localFilename string
remoteFilename string
err error
st os.FileInfo
)
expr, rest = split([]rune(argsLine))
if rest == nil {
term.Errorf("Usage: runscript <inventoree_expr> filename\n")
return
}
hosts, err = c.store.HostList(expr)
if err != nil {
term.Errorf("Error parsing expression %s: %s\n", string(expr), err)
return
}
if len(hosts) == 0 {
term.Errorf("Empty hostlist\n")
return
}
c.acquirePasswd()
localFilename = string(rest)
st, err = os.Stat(localFilename)
if err != nil {
term.Errorf("Error stat %s: %s\n", localFilename, err)
return
}
if st.IsDir() {
term.Errorf("%s is a directory\n", localFilename)
return
}
now := time.Now().Format("20060102-150405")
remoteFilename = fmt.Sprintf("tmp.xc.%s_%s", now, filepath.Base(localFilename))
remoteFilename = filepath.Join(c.remoteTmpDir, remoteFilename)
dr := remote.Distribute(hosts, localFilename, remoteFilename, false)
copyError := dr.ErrorHosts
hosts = dr.SuccessHosts
cmd := fmt.Sprintf("%s; rm %s", remoteFilename, remoteFilename)
switch mode {
case emParallel:
r = remote.RunParallel(hosts, cmd)
case emCollapse:
r = remote.RunCollapse(hosts, cmd)
r.PrintOutputMap()
case emSerial:
r = remote.RunSerial(hosts, cmd, c.delay)
}
r.ErrorHosts = append(r.ErrorHosts, copyError...)
r.Print()
}

237
cli/completer.go Normal file
View File

@@ -0,0 +1,237 @@
package cli
import (
"os"
"path/filepath"
"sort"
"strings"
"github.com/viert/xc/store"
"github.com/viert/xc/stringslice"
)
type completeFunc func([]rune) ([][]rune, int)
type completer struct {
cmds []string
handlers map[string]completeFunc
store *store.Store
}
func newCompleter(store *store.Store, commands []string) *completer {
x := &completer{commands, make(map[string]completeFunc), store}
x.handlers["mode"] = staticCompleter([]string{"collapse", "serial", "parallel"})
x.handlers["debug"] = onOffCompleter()
x.handlers["progressbar"] = onOffCompleter()
x.handlers["prepend_hostnames"] = onOffCompleter()
x.handlers["raise"] = staticCompleter([]string{"none", "su", "sudo"})
x.handlers["interpreter"] = staticCompleter([]string{"none", "su", "sudo"})
x.handlers["exec"] = x.completeExec
x.handlers["s_exec"] = x.completeExec
x.handlers["c_exec"] = x.completeExec
x.handlers["p_exec"] = x.completeExec
x.handlers["ssh"] = x.completeExec
x.handlers["hostlist"] = x.completeExec
x.handlers["cd"] = completeFiles
x.handlers["output"] = completeFiles
x.handlers["distribute"] = x.completeDistribute
x.handlers["runscript"] = x.completeDistribute
x.handlers["s_runscript"] = x.completeDistribute
x.handlers["c_runscript"] = x.completeDistribute
x.handlers["p_runscript"] = x.completeDistribute
helpTopics := append(commands, "expressions", "config", "rcfiles")
x.handlers["help"] = staticCompleter(helpTopics)
return x
}
func split(line []rune) ([]rune, []rune) {
strline := string(line)
tokens := whitespace.Split(strline, 2)
if len(tokens) < 2 {
return []rune(tokens[0]), nil
}
return []rune(tokens[0]), []rune(tokens[1])
}
func runes(src []string) (dst [][]rune) {
dst = make([][]rune, len(src))
for i := 0; i < len(src); i++ {
dst[i] = []rune(src[i])
}
return
}
func runeIndex(line []rune, sym rune) int {
for i := 0; i < len(line); i++ {
if line[i] == sym {
return i
}
}
return -1
}
func staticCompleter(options []string) completeFunc {
sort.Strings(options)
return func(line []rune) ([][]rune, int) {
ll := len(line)
sr := make([]string, 0)
for _, option := range options {
if strings.HasPrefix(option, string(line)) {
sr = append(sr, option[ll:])
}
}
return runes(sr), ll
}
}
func onOffCompleter() completeFunc {
return staticCompleter([]string{"on", "off"})
}
func completeFiles(line []rune) ([][]rune, int) {
ll := len(line)
path := string(line)
files, err := filepath.Glob(path + "*")
if err != nil {
return [][]rune{}, len(line)
}
results := make([][]rune, len(files))
for i := 0; i < len(files); i++ {
filename := files[i]
if st, err := os.Stat(filename); err == nil {
if st.IsDir() {
filename += "/"
}
}
results[i] = []rune(filename[ll:])
}
return results, ll
}
func (x *completer) complete(line []rune) ([][]rune, int) {
cmd, args := split(line)
if args == nil {
return x.completeCommand(cmd)
}
if handler, found := x.handlers[string(cmd)]; found {
return handler(args)
}
return [][]rune{}, 0
}
func (x *completer) completeCommand(line []rune) ([][]rune, int) {
sr := make([]string, 0)
for _, cmd := range x.cmds {
if strings.HasPrefix(cmd, string(line)) {
sr = append(sr, cmd[len(line):]+" ")
}
}
sort.Strings(sr)
return runes(sr), len(line)
}
func (x *completer) completeDistribute(line []rune) ([][]rune, int) {
_, cmd := split(line)
if cmd == nil {
return x.completeExec(line)
}
return completeFiles(cmd)
}
func (x *completer) completeExec(line []rune) ([][]rune, int) {
_, shellCmd := split(line)
if shellCmd != nil {
return [][]rune{}, 0
}
// are we in complex pattern? look for comma
ci := runeIndex(line, ',')
if ci >= 0 {
return x.completeExec(line[ci+1:])
}
// we are exactly in the beginning of the last expression
if len(line) > 0 && line[0] == '-' {
// exclusion is excluded from completion
return x.completeExec(line[1:])
}
if len(line) > 0 && line[0] == '%' {
return x.completeGroup(line[1:])
}
if len(line) > 0 && line[0] == '*' {
return x.completeWorkGroup(line[1:])
}
if len(line) > 0 && line[0] == '#' {
return x.completeTag(line[1:])
}
return x.completeHost(line)
}
func (x *completer) completeWorkGroup(line []rune) ([][]rune, int) {
ai := runeIndex(line, '@')
if ai >= 0 {
return x.completeDatacenter(line[ai+1:])
}
ti := runeIndex(line, '#')
if ti >= 0 {
return x.completeTag(line[ti+1:])
}
workgroups := x.store.CompleteWorkGroup(string(line))
return runes(workgroups), len(line)
}
func (x *completer) completeGroup(line []rune) ([][]rune, int) {
ai := runeIndex(line, '@')
if ai >= 0 {
return x.completeDatacenter(line[ai+1:])
}
ti := runeIndex(line, '#')
if ti >= 0 {
return x.completeTag(line[ti+1:])
}
groups := x.store.CompleteGroup(string(line))
return runes(groups), len(line)
}
func (x *completer) completeDatacenter(line []rune) ([][]rune, int) {
datacenters := x.store.CompleteDatacenter(string(line))
return runes(datacenters), len(line)
}
func (x *completer) completeHost(line []rune) ([][]rune, int) {
hosts := x.store.CompleteHost(string(line))
return runes(hosts), len(line)
}
func (x *completer) completeTag(line []rune) ([][]rune, int) {
tags := x.store.CompleteTag(string(line))
return runes(tags), len(line)
}
func (x *completer) Do(line []rune, pos int) ([][]rune, int) {
postfix := line[pos:]
result, length := x.complete(line[:pos])
if len(postfix) > 0 {
for i := 0; i < len(result); i++ {
result[i] = append(result[i], postfix...)
}
}
return result, length
}
func (x *completer) removeCommand(name string) {
idx := stringslice.Index(x.cmds, name)
if idx < 0 {
return
}
x.cmds = append(x.cmds[:idx], x.cmds[idx+1:]...)
}

500
cli/handlers.go Normal file
View File

@@ -0,0 +1,500 @@
package cli
import (
"fmt"
"os"
"os/exec"
"os/signal"
"strconv"
"syscall"
"github.com/viert/xc/remote"
"github.com/viert/xc/term"
)
func (c *Cli) setupCmdHandlers() {
c.handlers = make(map[string]cmdHandler)
c.handlers["exit"] = c.doExit
c.handlers["mode"] = c.doMode
c.handlers["parallel"] = c.doParallel
c.handlers["collapse"] = c.doCollapse
c.handlers["serial"] = c.doSerial
c.handlers["user"] = c.doUser
c.handlers["hostlist"] = c.doHostlist
c.handlers["exec"] = c.doExec
c.handlers["s_exec"] = c.doSExec
c.handlers["c_exec"] = c.doCExec
c.handlers["p_exec"] = c.doPExec
c.handlers["ssh"] = c.doSSH
c.handlers["raise"] = c.doRaise
c.handlers["passwd"] = c.doPasswd
c.handlers["cd"] = c.doCD
c.handlers["local"] = c.doLocal
c.handlers["alias"] = c.doAlias
c.handlers["delay"] = c.doDelay
c.handlers["debug"] = c.doDebug
c.handlers["reload"] = c.doReload
c.handlers["interpreter"] = c.doInterpreter
c.handlers["connect_timeout"] = c.doConnectTimeout
c.handlers["progressbar"] = c.doProgressBar
c.handlers["prepend_hostnames"] = c.doPrependHostnames
c.handlers["help"] = c.doHelp
c.handlers["output"] = c.doOutput
c.handlers["threads"] = c.doThreads
c.handlers["distribute"] = c.doDistribute
c.handlers["runscript"] = c.doRunScript
c.handlers["s_runscript"] = c.doSRunScript
c.handlers["c_runscript"] = c.doCRunScript
c.handlers["p_runscript"] = c.doPRunScript
commands := make([]string, len(c.handlers))
i := 0
for cmd := range c.handlers {
commands[i] = cmd
i++
}
c.completer = newCompleter(c.store, commands)
}
func (c *Cli) doExit(name string, argsLine string, args ...string) {
c.stopped = true
}
func (c *Cli) doMode(name string, argsLine string, args ...string) {
if len(args) < 1 {
term.Errorf("Usage: mode <[serial,parallel,collapse]>\n")
return
}
newMode := args[0]
for mode, modeStr := range modeMap {
if newMode == modeStr {
c.mode = mode
return
}
}
term.Errorf("Unknown mode: %s\n", newMode)
}
func (c *Cli) doCollapse(name string, argsLine string, args ...string) {
c.doMode("mode", "collapse", "collapse")
}
func (c *Cli) doParallel(name string, argsLine string, args ...string) {
c.doMode("mode", "parallel", "parallel")
}
func (c *Cli) doSerial(name string, argsLine string, args ...string) {
c.doMode("mode", "serial", "serial")
}
func (c *Cli) doUser(name string, argsLine string, args ...string) {
if len(args) < 1 {
term.Errorf("Usage: user <username>\n")
return
}
c.user = args[0]
}
func (c *Cli) doHostlist(name string, argsLine string, args ...string) {
if len(args) < 1 {
term.Errorf("Usage: hostlist <xc_expr>\n")
return
}
hosts, err := c.store.HostList([]rune(args[0]))
if err != nil {
term.Errorf("%s\n", err)
return
}
if len(hosts) == 0 {
term.Errorf("Empty hostlist\n")
return
}
maxHostnameLen := 0
for _, host := range hosts {
if len(host) > maxHostnameLen {
maxHostnameLen = len(host)
}
}
title := fmt.Sprintf(" Hostlist %s ", args[0])
hrlen := len(title)
if hrlen < maxHostnameLen+2 {
hrlen = maxHostnameLen + 2
}
hr := term.HR(hrlen)
fmt.Println(term.Green(hr))
fmt.Println(term.Green(title))
fmt.Println(term.Green(hr))
for _, host := range hosts {
fmt.Println(host)
}
term.Successf("Total: %d hosts\n", len(hosts))
}
func (c *Cli) doRaise(name string, argsLine string, args ...string) {
if len(args) < 1 {
term.Errorf("Usage: raise <su/sudo>\n")
return
}
currentRaiseType := c.raiseType
rt := args[0]
switch rt {
case "su":
c.raiseType = remote.RTSu
case "sudo":
c.raiseType = remote.RTSudo
case "none":
c.raiseType = remote.RTNone
default:
term.Errorf("Unknown raise type: %s\n", rt)
}
if c.raiseType != currentRaiseType {
// Drop passwd in case of changing raise type
c.raisePasswd = ""
}
}
func (c *Cli) doPasswd(name string, argsLine string, args ...string) {
passwd, err := c.rl.ReadPassword("Set su/sudo password: ")
if err != nil {
term.Errorf("%s\n", err)
return
}
c.raisePasswd = string(passwd)
}
func (c *Cli) doExec(name string, argsLine string, args ...string) {
c.doexec(c.mode, argsLine)
}
func (c *Cli) doCExec(name string, argsLine string, args ...string) {
c.doexec(emCollapse, argsLine)
}
func (c *Cli) doSExec(name string, argsLine string, args ...string) {
c.doexec(emSerial, argsLine)
}
func (c *Cli) doPExec(name string, argsLine string, args ...string) {
c.doexec(emParallel, argsLine)
}
func (c *Cli) doSSH(name string, argsLine string, args ...string) {
if len(args) < 1 {
term.Errorf("Usage: ssh <inventoree_expr>\n")
return
}
c.acquirePasswd()
expr, rest := split([]rune(argsLine))
hosts, err := c.store.HostList([]rune(expr))
if err != nil {
term.Errorf("Error parsing expression %s: %s\n", expr, err)
return
}
if len(hosts) == 0 {
term.Errorf("Empty hostlist\n")
return
}
remote.SetUser(c.user)
remote.SetPassword(c.raisePasswd)
remote.SetRaise(c.raiseType)
cmd := string(rest)
remote.RunSerial(hosts, cmd, 0)
}
func (c *Cli) doCD(name string, argsLine string, args ...string) {
if len(args) < 1 {
term.Errorf("Usage: cd <directory>\n")
return
}
err := os.Chdir(argsLine)
if err != nil {
term.Errorf("Error changing directory: %s\n", err)
}
}
func (c *Cli) doLocal(name string, argsLine string, args ...string) {
if len(args) < 1 {
term.Errorf("Usage: local <localcmd> [...args]\n")
return
}
// ignore keyboard interrupt signals
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT)
defer signal.Reset()
cmd := exec.Command("bash", "-c", fmt.Sprintf("%s", argsLine))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Run()
}
func (c *Cli) doAlias(name string, argsLine string, args ...string) {
aliasName, rest := split([]rune(argsLine))
if len(aliasName) == 0 {
term.Errorf("Usage: alias <alias_name> <command> [...args]\n")
return
}
if rest == nil || len(rest) == 0 {
err := c.removeAlias(aliasName)
if err != nil {
term.Errorf("Error removing alias \"%s\": %s\n", string(aliasName), err)
}
} else {
err := c.createAlias(aliasName, rest)
if err != nil {
term.Errorf("Error creating alias %s: %s\n", string(aliasName), err)
}
}
}
func (c *Cli) doDelay(name string, argsLine string, args ...string) {
if len(args) < 1 {
term.Errorf("Usage: delay <seconds>\n")
return
}
sec, err := strconv.ParseInt(args[0], 10, 8)
if err != nil {
term.Errorf("Invalid delay format: %s\n", err)
return
}
c.delay = int(sec)
}
func (c *Cli) doDebug(name string, argsLine string, args ...string) {
if len(args) < 1 {
value := "off"
if c.debug {
value = "on"
}
term.Warnf("Debug is %s\n", value)
return
}
switch args[0] {
case "on":
c.debug = true
case "off":
c.debug = false
default:
term.Errorf("Invalid debug value. Please use either \"on\" or \"off\"\n")
return
}
remote.SetDebug(c.debug)
}
func (c *Cli) doProgressBar(name string, argsLine string, args ...string) {
if len(args) < 1 {
value := "off"
if c.progressBar {
value = "on"
}
term.Warnf("Progressbar is %s\n", value)
return
}
switch args[0] {
case "on":
c.progressBar = true
case "off":
c.progressBar = false
default:
term.Errorf("Invalid progressbar value. Please use either \"on\" or \"off\"\n")
return
}
remote.SetProgressBar(c.progressBar)
}
func (c *Cli) doPrependHostnames(name string, argsLine string, args ...string) {
if len(args) < 1 {
value := "off"
if c.prependHostnames {
value = "on"
}
term.Warnf("prepend_hostnames is %s\n", value)
return
}
switch args[0] {
case "on":
c.prependHostnames = true
case "off":
c.prependHostnames = false
default:
term.Errorf("Invalid prepend_hostnames value. Please use either \"on\" or \"off\"\n")
return
}
remote.SetPrependHostnames(c.prependHostnames)
}
func (c *Cli) doReload(name string, argsLine string, args ...string) {
err := c.store.BackendReload()
if err != nil {
term.Errorf("Error reloading data from backend\n")
}
}
func (c *Cli) doInterpreter(name string, argsLine string, args ...string) {
if len(args) == 0 {
term.Warnf("Using \"%s\" for commands with none-type raise\n", c.interpreter)
term.Warnf("Using \"%s\" for commands with sudo-type raise\n", c.sudoInterpreter)
term.Warnf("Using \"%s\" for commands with su-type raise\n", c.suInterpreter)
return
}
iType, interpreter := split([]rune(argsLine))
c.setInterpreter(string(iType), string(interpreter))
}
func (c *Cli) doConnectTimeout(name string, argsLine string, args ...string) {
if len(args) < 1 {
term.Warnf("connect_timeout = %s\n", c.connectTimeout)
return
}
ct, err := strconv.ParseInt(args[0], 10, 64)
if err != nil {
term.Errorf("Error reading connect timeout value: %s\n", err)
return
}
c.connectTimeout = int(ct)
remote.SetConnectTimeout(c.connectTimeout)
}
func (c *Cli) doOutput(name string, argsLine string, args ...string) {
if len(args) == 0 {
if c.outputFile == nil {
term.Warnf("Output is switched off\n")
} else {
term.Successf("Output is copied to %s\n", c.outputFileName)
}
return
}
// special filename to switch off the output
if argsLine == "_" {
c.outputFileName = ""
if c.outputFile != nil {
c.outputFile.Close()
c.outputFile = nil
remote.SetOutputFile(nil)
}
term.Warnf("Output is switched off\n")
return
}
err := c.setOutput(argsLine)
if err == nil {
c.outputFileName = argsLine
term.Successf("Output is copied to %s\n", c.outputFileName)
} else {
term.Errorf("Error setting output file to %s: %s\n", argsLine, err)
}
}
func (c *Cli) doThreads(name string, argsLine string, args ...string) {
if len(args) == 0 {
term.Successf("Max SSH threads: %d\n", c.sshThreads)
return
}
threads, err := strconv.ParseInt(args[0], 10, 64)
if err != nil {
term.Errorf("Error setting max SSH threads value: %s\n", err)
return
}
if int(threads) == c.sshThreads {
term.Warnf("Max SSH threads value remains unchanged\n")
return
}
if threads < 1 {
term.Errorf("Max SSH threads can't be lower than 1\n")
return
}
if threads > maxSSHThreadsSane {
term.Errorf("Max SSH threads can't be higher than %d\n", maxSSHThreadsSane)
return
}
c.sshThreads = int(threads)
term.Successf("Max SSH threads set to %d\n", c.sshThreads)
remote.SetNumThreads(c.sshThreads)
term.Successf("Execution pool re-created\n")
}
func (c *Cli) doDistribute(name string, argsLine string, args ...string) {
var (
r *remote.ExecResult
expr []rune
rest []rune
lcl []rune
rmt []rune
hosts []string
localFilename string
remoteFilename string
err error
st os.FileInfo
)
expr, rest = split([]rune(argsLine))
if rest == nil {
term.Errorf("Usage: distribute <inventoree_expr> filename [remote_filename]\n")
return
}
hosts, err = c.store.HostList(expr)
if err != nil {
term.Errorf("Error parsing expression %s: %s\n", string(expr), err)
return
}
if len(hosts) == 0 {
term.Errorf("Empty hostlist\n")
return
}
lcl, rmt = split(rest)
localFilename = string(lcl)
if rmt == nil {
remoteFilename = localFilename
} else {
remoteFilename = string(rmt)
}
st, err = os.Stat(localFilename)
if err != nil {
term.Errorf("Error stat %s: %s\n", localFilename, err)
return
}
r = remote.Distribute(hosts, localFilename, remoteFilename, st.IsDir())
r.Print()
}
func (c *Cli) doRunScript(name string, argsLine string, args ...string) {
c.dorunscript(c.mode, argsLine)
}
func (c *Cli) doSRunScript(name string, argsLine string, args ...string) {
c.dorunscript(emSerial, argsLine)
}
func (c *Cli) doCRunScript(name string, argsLine string, args ...string) {
c.dorunscript(emCollapse, argsLine)
}
func (c *Cli) doPRunScript(name string, argsLine string, args ...string) {
c.dorunscript(emParallel, argsLine)
}

376
cli/help.go Normal file
View File

@@ -0,0 +1,376 @@
package cli
import (
"fmt"
"strings"
"github.com/viert/xc/term"
)
type helpItem struct {
help string
usage string
isTopic bool
}
var (
execHelp = &helpItem{
usage: "<host_expression> <command>",
help: `Runs a command on a list of servers.
List of hosts is represented by <host_expression> in its own syntax which can be learned
by using "help expressions" command.
exec can proceed in 3 different modes: serial, parallel and collapse.
In ` + term.Colored("serial", term.CWhite, true) + ` mode the command will be called server by server sequentally. Between servers in list
xc will hold for a delay which can be set with command "delay".
In ` + term.Colored("parallel", term.CWhite, true) + ` mode the command will be executed simultaneously. All output will be prefixed by
host name which the output line belongs to. Output is (almost) non buffered so one can use
parallel mode to run "infinite" commands like "tail -f" which is handy for watching logs from
the whole cluster in real-time.
The ` + term.Colored("collapse", term.CWhite, true) + ` mode is a lot like the parallel mode however the whole output is hidden until
the execution is over. In this mode xc prints the result grouped by the output so the differences
between hosts become more obvious. Try running "exec %group cat /etc/redhat-release" on a big
group of hosts in collapse mode to see if they have the same version of OS for example.
While the execution mode can be switched by "mode" command, there's a couple of shortcuts:
c_exec
p_exec
s_exec
which are capable to run exec in collapse, parallel or serial mode correspondingly without switching
the execution mode`,
}
runScriptHelp = &helpItem{
usage: "<host_expression> <scriptname>",
help: `Runs a local script on a given list of hosts.
To learn mode about <host_expression> type "help expressions".
runscript simply copies the script to every server in the list and then
run it according to current execution mode (Type "help exec" to learn more
on execution modes), i.e. it can run in parallel or sequentally like exec does.
There are also shortcut aliases c_runscript, s_runscript and p_runscript for calling runscript
in a particular execution mode without permanent switching to it.`,
}
modeHelp = `Switches execution mode
To learn more about execution modes type "help exec".
Xc has shortcuts for switching modes: just type "parallel", "serial" or "collapse" and it will
switch the mode correspondingly.`
helpStrings = map[string]*helpItem{
"alias": &helpItem{
usage: "<aliasname> <cmd> [<args>]",
help: `Creates a local alias. This is handy for longer commands which are often in use.
Example:
alias ls local ls - this will create a local alias "ls" which actually runs "local ls"
alias uptime p_exec #1 uptime - this creates a local alias "uptime" which runs "p_exec <ARG> uptime"
<ARG> will be taken from the alias command and put into p_exec command,
i.e. uptime %mygroup will run p_exec %mygroup uptime
Every alias created disappears after xc exits. To make an alias persistent put it into rcfile.
See "help rcfiles" for further info.`,
},
"cd": &helpItem{
usage: "<dir>",
help: "Changes working directory",
},
"config": &helpItem{
isTopic: true,
help: `Configuration file is located in ~/.xc.conf.
The first time xc starts it creates a default configuration file with all the settings set
to default values:
[main]
user =
mode = parallel
history_file = ~/.xc_history
cache_dir = ~/.xc_cache
rc_file = ~/.xcrc
raise = none
exit_confirm = true
backend_type = conductor
local_file = ~/.xc_hosts
[executer]
ssh_threads = 50
ssh_connect_timeout = 1
ping_count = 5
progress_bar = true
remote_tmpdir = /tmp
delay = 0
[inventoree]
url = http://c.inventoree.ru
work_groups =
Configuration is split to 3 sections: main, executer and inventoree.
main.user is the user which will be set on xc startup. If empty, the current system user is used.
main.mode is the execution mode which will be set on xc startup. See "help mode" for more info on execution modes.
main.history_file sets the history file
main.cache_dir sets the cache dir for data derived from inventoree
main.rc_file is the rcfile which will be executed on xc startup. See "help rcfiles" for more info.
main.raise is the raise mode which will be set on xc startup
main.exit_confirm is boolean setting for disable or enable confirmation on exit
main.backend_type is type of backend, conductor or localjson or localini now
main.local_file is path to json or ini local file, used when backend_type is localjson or localini
executer.ssh_threads limits the number of simultaneously running ssh commands.
executer.ssh_connect_timeout sets the default ssh connect timeout. You can change it at any moment using connect_timeout command.
executer.ping_count is not implemented yet and does nothing
executer.progress_bar sets progressbar on or off on xc startup
executer.remote_tmpdir is a temporary directory used on remote servers for various xc needs
executer.delay sets a delay in seconds between hosts when executing in serial mode. See "help delay" for more info
inventoree.url sets the url of the inventoree service
inventoree.work_groups is a comma-separated list of work_groups which will be downloaded from inventoree.
If empty all work groups (i.e. all groups and all hosts as well) are downloaded without filtering which
may cause startup delays`,
},
"rcfiles": &helpItem{
isTopic: true,
help: `Rcfile configured in .xc.conf file is executed every time xc starts.
It may be useful for configuring aliases (as they are dropped when xc exits) and other options.
Rcfile is just a number of xc commands in a text file.`,
},
"debug": &helpItem{
usage: "<on/off>",
help: `An internal debug. May cause unexpected output. One shouldn't use it unless she knows what she's doing.`,
},
"delay": &helpItem{
usage: "<seconds>",
help: `Sets a delay between hosts when in serial mode. This is useful for soft restarting
i.e. when you want to give a service some time to warm up before restarting it on next host.`,
},
"distribute": &helpItem{
usage: "<host_expression> <filename>",
help: `Distributes a local file to a number of hosts listed in "host_expression" in parallel.
See "help expressions" for further info on <host_expression>.
Example: distribute %mygroup hello.txt`,
},
"expressions": &helpItem{
help: `A lot of commands in xc use host expressions with a certain syntax to represent a list of hosts.
Every expression is a comma-separated list of tokens, where token may be
- a single host,
- a single group,
- a single workgroup,
and every item may optionally be limited to a particular datacenter, a given tag,
or even be completely excluded from the list.
Some self-explanatory examples:
host1,host2 - simple host list containing 2 hosts
%group1 - a group of hosts taken from inventoree
%group1,host1 - all hosts from group1, plus host1
%group1,-host2 - all hosts from group1, excluding(!) host2
%group2@dc1 - all hosts from group2, located in datacenter dc1
*myworkgroup@dc2,-%group3,host5 - all hosts from wg "myworkgroup" excluding hosts from group3, plus host5
%group5#tag1 - all hosts from group5 tagged with tag1
You may combine any number of tokens keeping in mind that they are resolved left to right, so exclusions
almost always should be on the righthand side. For example, "-host1,host1" will end up with host1 in list
despite being excluded previously.`,
isTopic: true,
},
"exec": execHelp,
"s_exec": execHelp,
"c_exec": execHelp,
"p_exec": execHelp,
"exit": &helpItem{
usage: "",
help: "Exits the xc program. You can also use Ctrl-D to quit xc.",
},
"help": &helpItem{
usage: "[<command>]",
help: "Shows help on various commands and topics",
},
"hostlist": &helpItem{
usage: "<host_expression>",
help: `Resolves the host expression and prints the resulting hostlist. To learn more about expressions
use "help expressions" command`,
},
"local": &helpItem{
usage: "<command>",
help: `Runs local command.
For example you may want to ping a host without leaving the xc.
This can be done by typing "local ping 1.1.1.1". For frequently used commands you may want to create
aliases like so: alias ping local ping #*. This will create an alias "ping" so you won't have to type
"local" in front of "ping" anymore. To learn more about aliases type "help alias"`,
},
"mode": &helpItem{
usage: "<serial/parallel/collapse>",
help: modeHelp,
},
"parallel": &helpItem{
usage: "",
help: modeHelp,
},
"serial": &helpItem{
usage: "",
help: modeHelp,
},
"collapse": &helpItem{
usage: "",
help: modeHelp,
},
"prepend_hostnames": &helpItem{
usage: "<on/off>",
help: `Sets prepend hostnames mode on or off. When calling without arguments, shows the current value.
This switches the appearence of hostnames before the output lines in parallel mode.
Switching them off is useful for copy-pasting the results.`,
},
"passwd": &helpItem{
usage: "",
help: `Sets the password for raising privileges`,
},
"progressbar": &helpItem{
usage: "[<on/off>]",
help: `Sets the progressbar on or off. If no value is given, prints the current value.`,
},
"raise": &helpItem{
usage: "<none/sudo/su>",
help: `Sets the type of raising privileges during running the "exec" command.
If the value is "none", no attempts to raise privileges will be made.`,
},
"reload": &helpItem{
usage: "",
help: `Reloads hosts and groups data from inventoree and rewrites the cache`,
},
"runscript": runScriptHelp,
"c_runscript": runScriptHelp,
"p_runscript": runScriptHelp,
"s_runscript": runScriptHelp,
"interpreter": &helpItem{
usage: "[raise_type interpreter]",
help: `When invoking without arguments, the command shows the current interpreters for each type
of privileges rasing ("help raise" to learn more on that). You can redefine interpreter
using this command as in the given examples:
interpreter su su -m
interpreter sudo sudo /bin/bash
interpreter none /bin/sh`,
},
"output": &helpItem{
usage: "[filename]",
help: `Copies the entire output of parallel(!) exec commands to a given logfile. To switch
the logging off, type "output _". When invoked without arguments, output command prints
the current output filename (or a message saying that the output logging is switched off)
and exits.`,
},
"ssh": &helpItem{
usage: "<host_expression>",
help: `Starts ssh session to hosts one by one, raising the privileges if raise type is not "none"
("help raise" to learn more) and gives the control to user. When user exits the session
xc moves on to the next server.`,
},
"threads": &helpItem{
usage: "[num_threads]",
help: `Sets max number of simultaneously running ssh threads to <num_threads>. When called
without arguments, prints the current value.`,
},
"user": &helpItem{
usage: "<username>",
help: `Sets the username for all the execution commands. This is used to get access to hosts via ssh/scp.`,
},
}
)
func (c *Cli) doHelp(name string, argsLine string, args ...string) {
if len(args) < 1 {
generalHelp()
return
}
if hs, found := helpStrings[args[0]]; found {
if hs.isTopic {
fmt.Printf("\nTopic: %s\n\n", term.Colored(args[0], term.CWhite, true))
} else {
fmt.Printf("\nCommand: %s %s\n\n", term.Colored(args[0], term.CWhite, true), hs.usage)
}
tokens := strings.Split(hs.help, "\n")
for _, token := range tokens {
fmt.Printf(" %s\n", token)
}
fmt.Println()
} else {
term.Errorf("There's no help on topic \"%s\"\n", args[0])
}
}
func generalHelp() {
fmt.Println(`
List of commands:
alias creates a local alias command
cd changes current working directory
collapse shortcut for "mode collapse"
debug one shouldn't use this
delay sets a delay between hosts in serial mode
distribute copies a file to a number of hosts in parallel
exec/c_exec/s_exec/p_exec executes a remote command on a number of hosts
exit exits the xc
help shows help on various topics
hostlist resolves a host expression to a list of hosts
interpreter sets interpreter for each type of privileges raising
local starts a local command
mode switches between execution modes
parallel shortcut for "mode parallel"
passwd sets passwd for privilege raise
progressbar controls progressbar
raise sets the privilege raise mode
reload reloads hosts and groups data from inventoree
runscript runs a local script on a number of remote hosts
serial shortcut for "mode serial"
ssh starts ssh session to a number of hosts sequentally
user sets current user
`)
}

78
cmd/xc/main.go Normal file
View File

@@ -0,0 +1,78 @@
package main
import (
"os"
"path"
"strings"
"github.com/viert/xc/backend/conductor"
"github.com/viert/xc/backend/inventoree"
"github.com/viert/xc/backend/localini"
"github.com/viert/xc/cli"
"github.com/viert/xc/config"
"github.com/viert/xc/term"
)
func main() {
var tool *cli.Cli
var err error
cfgFilename := path.Join(os.Getenv("HOME"), ".xc.conf")
xccfg, err := config.Read(cfgFilename)
if err != nil {
term.Errorf("Error reading config: %s\n", err)
return
}
switch xccfg.BackendCfg.Type {
case config.BTInventoree:
be, err := inventoree.New(xccfg)
if err != nil {
term.Errorf("Error creating inventoree backend: %s\n", err)
return
}
tool, err = cli.New(xccfg, be)
if err != nil {
term.Errorf("%s\n", err)
return
}
case config.BTIni:
be, err := localini.New(xccfg)
if err != nil {
term.Errorf("Error creating local ini backend: %s\n", err)
return
}
tool, err = cli.New(xccfg, be)
if err != nil {
term.Errorf("%s\n", err)
return
}
case config.BTConductor:
be, err := conductor.New(xccfg)
if err != nil {
term.Errorf("Error creating conductor backend: %s\n", err)
return
}
tool, err = cli.New(xccfg, be)
if err != nil {
term.Errorf("%s\n", err)
return
}
default:
term.Errorf("Backend type %s is not implemented yet\n", xccfg.BackendCfg.TypeString)
return
}
defer tool.Finalize()
if len(os.Args) < 2 {
tool.CmdLoop()
} else {
cmd := strings.Join(os.Args[1:], " ")
tool.OneCmd(cmd)
}
}

314
config/config.go Normal file
View File

@@ -0,0 +1,314 @@
package config
import (
"fmt"
"io/ioutil"
"os"
"strings"
"time"
"github.com/chzyer/readline"
"github.com/viert/properties"
)
const defaultConfigContents = `[main]
user =
mode = parallel
history_file = ~/.xc_history
cache_dir = ~/.xc_cache
cache_ttl = 336 # 24 * 7 * 2
rc_file = ~/.xcrc
log_file =
raise = none
exit_confirm = true
exec_confirm = true
[executer]
ssh_threads = 50
ssh_connect_timeout = 1
progress_bar = true
prepend_hostnames = true
remote_tmpdir = /tmp
delay = 0
interpreter = bash
interpreter_sudo = sudo bash
interpreter_su = su -
[backend]
type = conductor
url = http://c.inventoree.ru
work_groups =
`
// BackendType is a backend type enum
type BackendType int
// Backend types
const (
BTIni BackendType = iota
BTJSON
BTConductor
BTInventoree
)
// BackendConfig is a backend configuration struct
type BackendConfig struct {
Type BackendType
TypeString string
Options map[string]string
}
// XCConfig represents a configuration struct for XC
type XCConfig struct {
Readline *readline.Config
BackendCfg *BackendConfig
User string
SSHThreads int
SSHConnectTimeout int
PingCount int
RemoteTmpdir string
Mode string
RaiseType string
Delay int
RCfile string
CacheDir string
CacheTTL time.Duration
Debug bool
ProgressBar bool
PrependHostnames bool
LogFile string
ExitConfirm bool
ExecConfirm bool
SudoInterpreter string
SuInterpreter string
Interpreter string
}
const (
defaultHistoryFile = "~/.xc_history"
defaultCacheDir = "~/.xc_cache"
defaultRCfile = "~/.xcrc"
defaultCacheTTL = 24
defaultThreads = 50
defaultRemoteTmpDir = "/tmp"
defaultPingCount = 5
defaultDelay = 0
defaultMode = "parallel"
defaultRaiseType = "none"
defaultDebug = false
defaultProgressbar = true
defaultPrependHostnames = true
defaultSSHConnectTimeout = 1
defaultLogFile = ""
defaultExitConfirm = true
defaultExecConfirm = true
defaultInterpreter = "/bin/bash"
defaultSudoInterpreter = "sudo /bin/bash"
defaultSuInterpreter = "su -"
)
var (
defaultReadlineConfig = &readline.Config{
InterruptPrompt: "^C",
EOFPrompt: "exit",
HistorySearchFold: true,
}
defaultUser = os.Getenv("USER")
)
func expandPath(path string) string {
if strings.HasPrefix(path, "~/") {
path = "$HOME/" + path[2:]
}
return os.ExpandEnv(path)
}
// Read reads and parses a configuration file
func Read(filename string) (*XCConfig, error) {
return read(filename, false)
}
func read(filename string, secondPass bool) (*XCConfig, error) {
var props *properties.Properties
var err error
props, err = properties.Load(filename)
if err != nil {
if secondPass {
return nil, err
}
if os.IsNotExist(err) {
err = ioutil.WriteFile(filename, []byte(defaultConfigContents), 0644)
if err != nil {
return nil, err
}
}
return read(filename, true)
}
cfg := new(XCConfig)
cfg.Readline = defaultReadlineConfig
cfg.BackendCfg = &BackendConfig{Type: BTIni, Options: make(map[string]string)}
hf, err := props.GetString("main.history_file")
if err != nil {
hf = defaultHistoryFile
}
cfg.Readline.HistoryFile = expandPath(hf)
rcf, err := props.GetString("main.rc_file")
if err != nil {
rcf = defaultRCfile
}
cfg.RCfile = expandPath(rcf)
lf, err := props.GetString("main.log_file")
if err != nil {
lf = defaultLogFile
}
cfg.LogFile = expandPath(lf)
cttl, err := props.GetInt("main.cache_ttl")
if err != nil {
cttl = defaultCacheTTL
}
cfg.CacheTTL = time.Hour * time.Duration(cttl)
cd, err := props.GetString("main.cache_dir")
if err != nil {
cd = defaultCacheDir
}
cfg.CacheDir = expandPath(cd)
user, err := props.GetString("main.user")
if err != nil || user == "" {
user = defaultUser
}
cfg.User = user
threads, err := props.GetInt("executer.ssh_threads")
if err != nil {
threads = defaultThreads
}
cfg.SSHThreads = threads
ctimeout, err := props.GetInt("executer.ssh_connect_timeout")
if err != nil {
ctimeout = defaultSSHConnectTimeout
}
cfg.SSHConnectTimeout = ctimeout
delay, err := props.GetInt("executer.delay")
if err != nil {
delay = defaultDelay
}
cfg.Delay = delay
tmpdir, err := props.GetString("executer.remote_tmpdir")
if err != nil {
tmpdir = defaultRemoteTmpDir
}
cfg.RemoteTmpdir = tmpdir
pc, err := props.GetInt("executer.ping_count")
if err != nil {
pc = defaultPingCount
}
cfg.PingCount = pc
sdi, err := props.GetString("executer.interpreter_sudo")
if err != nil {
sdi = defaultSudoInterpreter
}
cfg.SudoInterpreter = sdi
si, err := props.GetString("executer.interpreter_su")
if err != nil {
si = defaultSuInterpreter
}
cfg.SuInterpreter = si
intrpr, err := props.GetString("executer.interpreter")
if err != nil {
intrpr = defaultInterpreter
}
cfg.Interpreter = intrpr
rt, err := props.GetString("main.raise")
if err != nil {
rt = defaultRaiseType
}
cfg.RaiseType = rt
mode, err := props.GetString("main.mode")
if err != nil {
mode = defaultMode
}
cfg.Mode = mode
dbg, err := props.GetBool("main.debug")
if err != nil {
dbg = defaultDebug
}
cfg.Debug = dbg
exitcnfrm, err := props.GetBool("main.exit_confirm")
if err != nil {
exitcnfrm = defaultExitConfirm
}
cfg.ExitConfirm = exitcnfrm
execcnfrm, err := props.GetBool("main.exec_confirm")
if err != nil {
execcnfrm = defaultExecConfirm
}
cfg.ExecConfirm = execcnfrm
pbar, err := props.GetBool("executer.progress_bar")
if err != nil {
pbar = defaultProgressbar
}
cfg.ProgressBar = pbar
phn, err := props.GetBool("executer.prepend_hostnames")
if err != nil {
phn = defaultPrependHostnames
}
cfg.PrependHostnames = phn
bkeys, err := props.Subkeys("backend")
if err != nil {
return nil, fmt.Errorf("Backend configuration error: %s", err)
}
typeFound := false
for _, key := range bkeys {
value, _ := props.GetString("backend." + key)
if key == "type" {
cfg.BackendCfg.TypeString = value
switch value {
case "ini":
cfg.BackendCfg.Type = BTIni
case "json":
cfg.BackendCfg.Type = BTJSON
case "conductor":
cfg.BackendCfg.Type = BTConductor
case "inventoree":
cfg.BackendCfg.Type = BTInventoree
default:
return nil, fmt.Errorf("Invalid backend type \"%s\"", value)
}
typeFound = true
} else {
cfg.BackendCfg.Options[key] = value
}
}
if !typeFound {
return nil, fmt.Errorf("Error configuring backend: backend type is not defined")
}
return cfg, err
}

15
go.mod Normal file
View File

@@ -0,0 +1,15 @@
module github.com/viert/xc
require (
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 // indirect
github.com/kr/pty v1.1.8
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/npat-efault/poller v2.0.0+incompatible
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
github.com/viert/properties v0.0.0-20190120163359-e72631698e82
github.com/viert/sekwence v0.0.0-20190110111110-24bab1ce82a0
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392
golang.org/x/sys v0.0.0-20190924062700-2aa67d56cdd7 // indirect
gopkg.in/cheggaaa/pb.v1 v1.0.28
)

29
go.sum Normal file
View File

@@ -0,0 +1,29 @@
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/creack/pty v1.1.7 h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/npat-efault/poller v2.0.0+incompatible h1:jtTdXWKgN5kDK41ts8hoY1rvTEi0K08MTB8/bRO9MqE=
github.com/npat-efault/poller v2.0.0+incompatible/go.mod h1:lni01B89P8PtVpwlAhdhK1niN5rPkDGGpGGgBJzpSgo=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/viert/properties v0.0.0-20190120163359-e72631698e82 h1:g8UhWyFPF/pLB8RODVUC/3Zeu8XGfmPShPj2gzFVGu8=
github.com/viert/properties v0.0.0-20190120163359-e72631698e82/go.mod h1:f8oD3Ns8EJsv2WPuvHvfJ1QybIPAI4tbbly/OK1Bjdo=
github.com/viert/sekwence v0.0.0-20190110111110-24bab1ce82a0 h1:jH3SJmLOQEMH8EIjbl2uV7rn1mxWlW/RRih09/+fdUU=
github.com/viert/sekwence v0.0.0-20190110111110-24bab1ce82a0/go.mod h1:zPZmp3wodzVxymq5GjioSuamAO/vVE2zh2iTsjTA3Z0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924062700-2aa67d56cdd7 h1:9Vs0Vm0p/0tnWLBWn79aav6fpcxKjBZbd21Lhxzit4k=
golang.org/x/sys v0.0.0-20190924062700-2aa67d56cdd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=
gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=

47
log/log.go Normal file
View File

@@ -0,0 +1,47 @@
package log
import (
"io/ioutil"
"os"
logging "github.com/op/go-logging"
)
var (
logger = logging.MustGetLogger("xc")
logfile *os.File
initialized = false
// Debug proxy
Debug = logger.Debug
// Debugf proxy
Debugf = logger.Debugf
)
// Initialize logger
func Initialize(logFilename string) error {
if logFilename == "" {
setupNullLogger()
return nil
}
logfile, err := os.OpenFile(logFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
setupNullLogger()
return err
}
backend := logging.NewLogBackend(logfile, "", 0)
format := logging.MustStringFormatter(
`[%{time:15:04:05.000}] %{message}`,
)
backendFormatter := logging.NewBackendFormatter(backend, format)
logging.SetBackend(backendFormatter)
logger.Debug("logger initialized")
initialized = true
return nil
}
func setupNullLogger() {
backend := logging.NewLogBackend(ioutil.Discard, "", 0)
logging.SetBackend(backend)
}

79
remote/copy.go Normal file
View File

@@ -0,0 +1,79 @@
package remote
import (
"os"
"os/exec"
"syscall"
"time"
"github.com/kr/pty"
"github.com/npat-efault/poller"
"github.com/viert/xc/log"
)
func (w *Worker) copy(task *Task) int {
var err error
var n int
cmd := createSCPCmd(task.Hostname, task.LocalFilename, task.RemoteFilename, task.RecursiveCopy)
cmd.Env = append(os.Environ(), environment...)
ptmx, err := pty.Start(cmd)
if err != nil {
return ErrTerminalError
}
defer ptmx.Close()
fd, err := poller.NewFD(int(ptmx.Fd()))
if err != nil {
return ErrTerminalError
}
defer fd.Close()
buf := make([]byte, bufferSize)
taskForceStopped := false
for {
if w.forceStopped() {
taskForceStopped = true
break
}
fd.SetReadDeadline(time.Now().Add(pollDeadline))
n, err = fd.Read(buf)
if err != nil {
if err != poller.ErrTimeout {
// EOF, done
break
} else {
continue
}
}
if n == 0 {
continue
}
w.data <- &Message{buf[:n], MTDebug, task.Hostname, 0}
buf = make([]byte, bufferSize)
}
exitCode := 0
if taskForceStopped {
cmd.Process.Kill()
exitCode = ErrForceStop
log.Debugf("WRK[%d]: Task on %s was force stopped", w.id, task.Hostname)
}
err = cmd.Wait()
if !taskForceStopped {
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
ws := exitErr.Sys().(syscall.WaitStatus)
exitCode = ws.ExitStatus()
} else {
// MacOS hack
exitCode = ErrMacOsExit
}
}
log.Debugf("WRK[%d]: Task on %s exit code is %d", w.id, task.Hostname, exitCode)
}
return exitCode
}

78
remote/distribute.go Normal file
View File

@@ -0,0 +1,78 @@
package remote
import (
"os"
"os/signal"
"sync"
"syscall"
"github.com/viert/xc/log"
pb "gopkg.in/cheggaaa/pb.v1"
)
// Distribute distributes a given local file or directory to a number of hosts
func Distribute(hosts []string, localFilename string, remoteFilename string, recursive bool) *ExecResult {
var (
wg sync.WaitGroup
bar *pb.ProgressBar
sigs chan os.Signal
r *ExecResult
t *Task
running int
)
r = newExecResult()
running = len(hosts)
if currentProgressBar {
bar = pb.StartNew(running)
}
sigs = make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT)
defer signal.Reset()
go func() {
for _, host := range hosts {
t = &Task{
Hostname: host,
LocalFilename: localFilename,
RemoteFilename: remoteFilename,
RecursiveCopy: recursive,
Cmd: "",
WG: &wg,
}
pool.AddTask(t)
}
wg.Wait()
}()
for running > 0 {
select {
case d := <-pool.Data:
switch d.Type {
case MTDebug:
if currentDebug {
log.Debugf("DATASTREAM @ %s\n%v\n[%v]", d.Hostname, d.Data, string(d.Data))
}
case MTCopyFinished:
running--
if currentProgressBar {
bar.Increment()
}
r.Codes[d.Hostname] = d.StatusCode
if d.StatusCode == 0 {
r.SuccessHosts = append(r.SuccessHosts, d.Hostname)
} else {
r.ErrorHosts = append(r.ErrorHosts, d.Hostname)
}
}
case <-sigs:
r.ForceStoppedHosts = pool.ForceStopAllTasks()
}
}
if currentProgressBar {
bar.Finish()
}
return r
}

238
remote/executer.go Normal file
View File

@@ -0,0 +1,238 @@
package remote
import (
"bytes"
"fmt"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"github.com/viert/xc/log"
"github.com/viert/xc/term"
pb "gopkg.in/cheggaaa/pb.v1"
)
const (
stdoutWriteRetry = 25
)
// ExecResult is a struct with execution results
type ExecResult struct {
Codes map[string]int
Outputs map[string][]string
SuccessHosts []string
ErrorHosts []string
ForceStoppedHosts int
}
func newExecResult() *ExecResult {
return &ExecResult{
Codes: make(map[string]int),
Outputs: make(map[string][]string),
SuccessHosts: make([]string, 0),
ErrorHosts: make([]string, 0),
ForceStoppedHosts: 0,
}
}
// Print prints ExecResults in a nice way
func (r *ExecResult) Print() {
msg := fmt.Sprintf(" Hosts processed: %d, success: %d, error: %d ",
len(r.SuccessHosts)+len(r.ErrorHosts), len(r.SuccessHosts), len(r.ErrorHosts))
h := term.HR(len(msg))
fmt.Println(term.Green(h))
fmt.Println(term.Green(msg))
fmt.Println(term.Green(h))
}
// PrintOutputMap prints collapsed-style output
func (r *ExecResult) PrintOutputMap() {
for output, hosts := range r.Outputs {
msg := fmt.Sprintf(" %d host(s): %s ", len(hosts), strings.Join(hosts, ","))
tableWidth := len(msg) + 2
termWidth := term.GetTerminalWidth()
if tableWidth > termWidth {
tableWidth = termWidth
}
fmt.Println(term.Blue(term.HR(tableWidth)))
fmt.Println(term.Blue(msg))
fmt.Println(term.Blue(term.HR(tableWidth)))
fmt.Println(output)
}
}
func enqueue(local string, remote string, hosts []string) {
// This is in a goroutine because of decreasing the task channel size.
// If there is a number of hosts greater than pool.dataSizeQueue (i.e. 1024)
// this loop will actually block on reaching the limit until some tasks are
// processed and some space in the queue is released.
//
// To avoid blocking on task generation this loop was moved into a goroutine
var wg sync.WaitGroup
for _, host := range hosts {
// remoteFile should include hostname for the case we have
// a number of aliases pointing to one server. With the same
// remote filename the first task finished removes the file
// while other tasks on the same server try to remove it afterwards and fail
remoteFilename := fmt.Sprintf("%s.%s.sh", remote, host)
task := &Task{
Hostname: host,
LocalFilename: local,
RemoteFilename: remoteFilename,
Cmd: remoteFilename,
WG: &wg,
}
pool.AddTask(task)
}
wg.Wait()
}
// RunParallel runs cmd on hosts in parallel mode
func RunParallel(hosts []string, cmd string) *ExecResult {
r := newExecResult()
if len(hosts) == 0 {
return r
}
local, remote, err := prepareTempFiles(cmd)
if err != nil {
term.Errorf("Error creating temporary file: %s\n", err)
return r
}
defer os.Remove(local)
running := len(hosts)
copied := 0
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT)
defer signal.Reset()
go enqueue(local, remote, hosts)
for running > 0 {
select {
case d := <-pool.Data:
switch d.Type {
case MTData:
log.Debugf("MSG@%s[DATA](%d): %s", d.Hostname, d.StatusCode, string(d.Data))
if !bytes.HasSuffix(d.Data, []byte{'\n'}) {
d.Data = append(d.Data, '\n')
}
if currentPrependHostnames {
fmt.Printf("%s: ", term.Blue(d.Hostname))
}
fmt.Print(string(d.Data))
writeHostOutput(d.Hostname, d.Data)
case MTDebug:
if currentDebug {
log.Debugf("DATASTREAM @ %s\n%v\n[%v]", d.Hostname, d.Data, string(d.Data))
}
case MTCopyFinished:
log.Debugf("MSG@%s[COPYFIN](%d): %s", d.Hostname, d.StatusCode, string(d.Data))
if d.StatusCode == 0 {
copied++
}
case MTExecFinished:
log.Debugf("MSG@%s[EXECFIN](%d): %s", d.Hostname, d.StatusCode, string(d.Data))
r.Codes[d.Hostname] = d.StatusCode
if d.StatusCode == 0 {
r.SuccessHosts = append(r.SuccessHosts, d.Hostname)
} else {
r.ErrorHosts = append(r.ErrorHosts, d.Hostname)
}
running--
}
case <-sigs:
fmt.Println()
r.ForceStoppedHosts = pool.ForceStopAllTasks()
}
}
return r
}
// RunCollapse runs cmd on hosts in collapse mode
func RunCollapse(hosts []string, cmd string) *ExecResult {
var bar *pb.ProgressBar
r := newExecResult()
if len(hosts) == 0 {
return r
}
local, remote, err := prepareTempFiles(cmd)
if err != nil {
term.Errorf("Error creating temporary file: %s\n", err)
return r
}
defer os.Remove(local)
running := len(hosts)
copied := 0
outputs := make(map[string]string)
if currentProgressBar {
bar = pb.StartNew(running)
}
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT)
defer signal.Reset()
go enqueue(local, remote, hosts)
for running > 0 {
select {
case d := <-pool.Data:
switch d.Type {
case MTData:
outputs[d.Hostname] += string(d.Data)
logData := make([]byte, len(d.Data))
copy(logData, d.Data)
if !bytes.HasSuffix(d.Data, []byte{'\n'}) {
logData = append(d.Data, '\n')
}
writeHostOutput(d.Hostname, logData)
case MTDebug:
if currentDebug {
log.Debugf("DATASTREAM @ %s\n%v\n[%v]", d.Hostname, d.Data, string(d.Data))
}
case MTCopyFinished:
if d.StatusCode == 0 {
copied++
}
case MTExecFinished:
if currentProgressBar {
bar.Increment()
}
r.Codes[d.Hostname] = d.StatusCode
if d.StatusCode == 0 {
r.SuccessHosts = append(r.SuccessHosts, d.Hostname)
} else {
r.ErrorHosts = append(r.ErrorHosts, d.Hostname)
}
running--
}
case <-sigs:
fmt.Println()
r.ForceStoppedHosts = pool.ForceStopAllTasks()
}
}
if currentProgressBar {
bar.Finish()
}
for k, v := range outputs {
_, found := r.Outputs[v]
if !found {
r.Outputs[v] = make([]string, 0)
}
r.Outputs[v] = append(r.Outputs[v], k)
}
return r
}

91
remote/pool.go Normal file
View File

@@ -0,0 +1,91 @@
package remote
import (
"github.com/viert/xc/log"
)
const (
dataQueueSize = 1024
)
// Pool is a class representing a worker pool
type Pool struct {
workers []*Worker
queue chan *Task
Data chan *Message
}
// NewPool creates a new worker pool of a given size
func NewPool(size int) *Pool {
p := &Pool{
workers: make([]*Worker, size),
queue: make(chan *Task, dataQueueSize),
Data: make(chan *Message, dataQueueSize),
}
for i := 0; i < size; i++ {
p.workers[i] = NewWorker(p.queue, p.Data)
}
log.Debugf("Remote execution pool created with %d workers", size)
log.Debugf("Data Queue Size is %d", dataQueueSize)
return p
}
// ForceStopAllTasks removes all pending tasks and force stops those in progress
func (p *Pool) ForceStopAllTasks() int {
// Remove all pending tasks from the queue
log.Debug("Force stopping all tasks")
i := 0
rmvLoop:
for {
select {
case <-p.queue:
i++
continue
default:
break rmvLoop
}
}
log.Debugf("%d queued (and not yet started) tasks removed from the queue", i)
stopped := 0
for _, wrk := range p.workers {
if wrk.ForceStop() {
log.Debugf("Worker %d was running a task so force stopped", wrk.ID())
stopped++
}
}
return stopped
}
// Close shuts down the pool itself and all its workers
func (p *Pool) Close() {
log.Debug("Closing remote execution pool")
p.ForceStopAllTasks()
close(p.queue) // this should make all the workers step out of range loop on queue chan and shut down
log.Debug("Closing the task queue")
close(p.Data)
}
// AddTask adds a task to the pool queue
func (p *Pool) AddTask(task *Task) {
if task.WG != nil {
task.WG.Add(1)
}
p.queue <- task
}
// AddTaskHostlist creates multiple tasks to be run on a multiple hosts
func (p *Pool) AddTaskHostlist(task *Task, hosts []string) {
for _, host := range hosts {
t := &Task{
Hostname: host,
LocalFilename: task.LocalFilename,
RemoteFilename: task.RemoteFilename,
Cmd: task.Cmd,
WG: task.WG,
}
p.AddTask(t)
}
}

136
remote/remote.go Normal file
View File

@@ -0,0 +1,136 @@
package remote
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
)
var (
pool *Pool
currentUser string
currentPassword string
currentRaise RaiseType
currentProgressBar bool
currentPrependHostnames bool
currentRemoteTmpdir string
currentDebug bool
outputFile *os.File
noneInterpreter string
suInterpreter string
sudoInterpreter string
)
// Initialize initializes new execution pool
func Initialize(numThreads int, username string) {
pool = NewPool(numThreads)
SetUser(username)
SetPassword("")
SetRaise(RTNone)
}
// SetInterpreter sets none-raise interpreter
func SetInterpreter(interpreter string) {
noneInterpreter = interpreter
}
// SetSudoInterpreter sets sudo-raise interpreter
func SetSudoInterpreter(interpreter string) {
sudoInterpreter = interpreter
}
// SetSuInterpreter sets su-raise interpreter
func SetSuInterpreter(interpreter string) {
suInterpreter = interpreter
}
// SetUser sets executer username
func SetUser(username string) {
currentUser = username
}
// SetRaise sets executer raise type
func SetRaise(raise RaiseType) {
currentRaise = raise
}
// SetPassword sets executer password
func SetPassword(password string) {
currentPassword = password
}
// SetProgressBar sets current progressbar mode
func SetProgressBar(pbar bool) {
currentProgressBar = pbar
}
// SetRemoteTmpdir sets current remote temp directory
func SetRemoteTmpdir(tmpDir string) {
currentRemoteTmpdir = tmpDir
}
// SetDebug sets current debug mode
func SetDebug(debug bool) {
currentDebug = debug
}
// SetPrependHostnames sets current prepend_hostnames value for parallel mode
func SetPrependHostnames(prependHostnames bool) {
currentPrependHostnames = prependHostnames
}
// SetConnectTimeout sets the ssh connect timeout in sshOptions
func SetConnectTimeout(timeout int) {
sshOptions["ConnectTimeout"] = fmt.Sprintf("%d", timeout)
}
// SetOutputFile sets output file for every command.
// if it's nil, no output will be written to files
func SetOutputFile(f *os.File) {
outputFile = f
}
// SetNumThreads recreates the execution pool with the given number of threads
func SetNumThreads(numThreads int) {
if len(pool.workers) == numThreads {
return
}
pool.Close()
pool = NewPool(numThreads)
}
func prepareTempFiles(cmd string) (string, string, error) {
f, err := ioutil.TempFile("", "xc.")
if err != nil {
return "", "", err
}
defer f.Close()
remoteFilename := filepath.Join(currentRemoteTmpdir, filepath.Base(f.Name()))
io.WriteString(f, "#!/bin/bash\n\n")
io.WriteString(f, fmt.Sprintf("nohup bash -c \"sleep 1; rm -f $0\" >/dev/null 2>&1 </dev/null &\n")) // self-destroy
io.WriteString(f, cmd+"\n") // run command
f.Chmod(0755)
return f.Name(), remoteFilename, nil
}
// WriteOutput writes output to a user-defined logfile
// prepending with the current datetime
func WriteOutput(message string) {
if outputFile == nil {
return
}
tm := time.Now().Format("2006-01-02 15:04:05")
message = fmt.Sprintf("[%s] %s", tm, message)
outputFile.Write([]byte(message))
}
func writeHostOutput(host string, data []byte) {
message := fmt.Sprintf("%s: %s", host, string(data))
WriteOutput(message)
}

129
remote/runcmd.go Normal file
View File

@@ -0,0 +1,129 @@
package remote
import (
"bytes"
"os"
"os/exec"
"syscall"
"time"
"github.com/kr/pty"
"github.com/npat-efault/poller"
"github.com/viert/xc/log"
)
func (w *Worker) runcmd(task *Task) int {
var err error
var n int
var passwordSent bool
passwordSent = currentRaise == RTNone
cmd := createSSHCmd(task.Hostname, task.Cmd)
cmd.Env = append(os.Environ(), environment...)
ptmx, err := pty.Start(cmd)
if err != nil {
return ErrTerminalError
}
defer ptmx.Close()
fd, err := poller.NewFD(int(ptmx.Fd()))
if err != nil {
return ErrTerminalError
}
defer fd.Close()
buf := make([]byte, bufferSize)
taskForceStopped := false
shouldSkipEcho := false
msgCount := 0
execLoop:
for {
if w.forceStopped() {
taskForceStopped = true
break
}
fd.SetReadDeadline(time.Now().Add(pollDeadline))
n, err = fd.Read(buf)
if err != nil {
if err != poller.ErrTimeout {
// EOF, done
break
} else {
continue
}
}
if n == 0 {
continue
}
w.data <- &Message{buf, MTDebug, task.Hostname, -1}
msgCount++
chunks := bytes.SplitAfter(buf[:n], []byte{'\n'})
for _, chunk := range chunks {
// Trying to find Password prompt in first 5 chunks of data from server
if msgCount < 5 {
if !passwordSent && exPasswdPrompt.Match(chunk) {
ptmx.Write([]byte(currentPassword + "\n"))
passwordSent = true
shouldSkipEcho = true
continue
}
if shouldSkipEcho && exEcho.Match(chunk) {
shouldSkipEcho = false
continue
}
if passwordSent && exWrongPassword.Match(chunk) {
w.data <- &Message{[]byte("sudo: Authentication failure\n"), MTData, task.Hostname, -1}
taskForceStopped = true
break execLoop
}
}
if len(chunk) == 0 {
continue
}
if exConnectionClosed.Match(chunk) {
continue
}
if exLostConnection.Match(chunk) {
continue
}
// avoiding passing loop variable further as it's going to change its contents
data := make([]byte, len(chunk))
copy(data, chunk)
w.data <- &Message{data, MTData, task.Hostname, -1}
}
}
exitCode := 0
if taskForceStopped {
cmd.Process.Kill()
exitCode = ErrForceStop
log.Debugf("WRK[%d]: Task on %s was force stopped", w.id, task.Hostname)
}
err = cmd.Wait()
if !taskForceStopped {
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
ws := exitErr.Sys().(syscall.WaitStatus)
exitCode = ws.ExitStatus()
} else {
// MacOS hack
exitCode = ErrMacOsExit
}
}
log.Debugf("WRK[%d]: Task on %s exit code is %d", w.id, task.Hostname, exitCode)
}
return exitCode
}

286
remote/serial.go Normal file
View File

@@ -0,0 +1,286 @@
package remote
import (
"fmt"
"os"
"os/exec"
"os/signal"
"syscall"
"time"
"github.com/kr/pty"
"github.com/npat-efault/poller"
"github.com/viert/xc/log"
"github.com/viert/xc/term"
"golang.org/x/crypto/ssh/terminal"
)
var (
passwordSent = false
shouldSkipEcho = false
)
func forwardUserInput(in *poller.FD, out *os.File, stopped *bool) {
inBuf := make([]byte, bufferSize)
// processing stdin
for {
deadline := time.Now().Add(pollDeadline)
in.SetReadDeadline(deadline)
n, err := in.Read(inBuf)
if n > 0 {
// copy stdin to process ptmx
out.Write(inBuf[:n])
inBuf = make([]byte, bufferSize)
}
if err != nil {
if err != poller.ErrTimeout {
break
}
}
if *stopped {
break
}
}
}
func interceptProcessOutput(in []byte, ptmx *os.File) (out []byte, err error) {
out = []byte{}
err = nil
if exConnectionClosed.Match(in) {
log.Debug("Connection closed message catched")
return
}
if exLostConnection.Match(in) {
log.Debug("Lost connection message catched")
return
}
if !passwordSent && exPasswdPrompt.Match(in) {
ptmx.Write([]byte(currentPassword + "\n"))
passwordSent = true
shouldSkipEcho = true
log.Debug("Password sent")
return
}
if shouldSkipEcho && exEcho.Match(in) {
log.Debug("Echo skipped")
shouldSkipEcho = false
return
}
if passwordSent && exWrongPassword.Match(in) {
log.Debug("Authentication error while raising privileges")
err = fmt.Errorf("auth_error")
return
}
out = in
return
}
func runAtHost(host string, cmd *exec.Cmd, r *ExecResult) {
var (
ptmx *os.File
si *poller.FD
buf []byte
err error
stopped = false
)
passwordSent = false
shouldSkipEcho = false
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGWINCH)
defer signal.Reset()
ptmx, err = pty.Start(cmd)
if err != nil {
term.Errorf("Error creating PTY: %s\n", err)
r.ErrorHosts = append(r.ErrorHosts, host)
r.Codes[host] = ErrTerminalError
return
}
pty.InheritSize(os.Stdin, ptmx)
defer ptmx.Close()
stdinBackup, err := syscall.Dup(int(os.Stdin.Fd()))
if err != nil {
term.Errorf("Error duplicating stdin descriptor: %s\n", err)
r.ErrorHosts = append(r.ErrorHosts, host)
r.Codes[host] = ErrTerminalError
return
}
stdinState, err := terminal.MakeRaw(int(os.Stdin.Fd()))
if err != nil {
term.Errorf("Error setting stdin to raw mode: %s\n", err)
r.ErrorHosts = append(r.ErrorHosts, host)
r.Codes[host] = ErrTerminalError
return
}
defer func() {
terminal.Restore(int(os.Stdin.Fd()), stdinState)
}()
si, err = poller.NewFD(int(os.Stdin.Fd()))
if err != nil {
term.Errorf("Error initializing poller: %s\n", err)
r.ErrorHosts = append(r.ErrorHosts, host)
r.Codes[host] = ErrTerminalError
return
}
defer func() {
log.Debug("Setting stdin back to blocking mode")
si.Close()
syscall.Dup2(stdinBackup, int(os.Stdin.Fd()))
syscall.SetNonblock(int(os.Stdin.Fd()), false)
}()
buf = make([]byte, bufferSize)
go forwardUserInput(si, ptmx, &stopped)
for {
n, err := ptmx.Read(buf)
if n > 0 {
// TODO random stuff with intercepting and omitting data
data, err := interceptProcessOutput(buf[:n], ptmx)
if err != nil {
// auth error, can't proceed
raise := "su"
if currentRaise == RTSudo {
raise = "sudo"
}
log.Debugf("Wrong %s password\n", raise)
term.Errorf("Wrong %s password\n", raise)
r.ErrorHosts = append(r.ErrorHosts, host)
r.Codes[host] = ErrAuthenticationError
break
}
if len(data) > 0 {
// copy stdin to process ptmx
_, err = os.Stdout.Write(data)
if err != nil {
count := stdoutWriteRetry
for os.IsTimeout(err) && count > 0 {
time.Sleep(time.Millisecond)
_, err = os.Stdout.Write(data)
count--
}
if err != nil {
log.Debugf("error writing to stdout not resolved in %d steps", stdoutWriteRetry)
}
}
}
}
if err != nil && err != poller.ErrTimeout {
stopped = true
break
}
select {
case <-sigs:
pty.InheritSize(os.Stdin, ptmx)
default:
continue
}
}
}
// RunSerial runs cmd on hosts in serial mode
func RunSerial(hosts []string, argv string, delay int) *ExecResult {
var (
err error
cmd *exec.Cmd
local string
remotePrefix string
remoteCmd string
sigs = make(chan os.Signal, 1)
)
r := newExecResult()
if argv != "" {
local, remotePrefix, err = prepareTempFiles(argv)
if err != nil {
term.Errorf("Error creating tempfile: %s\n", err)
return r
}
defer os.Remove(local)
}
execLoop:
for i, host := range hosts {
msg := term.HR(7) + " " + host + " " + term.HR(36-len(host))
fmt.Println(term.Blue(msg))
if argv != "" {
remoteCmd = fmt.Sprintf("%s.%s.sh", remotePrefix, host)
cmd = createSCPCmd(host, local, remoteCmd, false)
log.Debugf("Created SCP command: %v", cmd)
signal.Notify(sigs, syscall.SIGINT)
err = cmd.Run()
signal.Reset()
if err != nil {
term.Errorf("Error copying tempfile: %s\n", err)
r.ErrorHosts = append(r.ErrorHosts, host)
r.Codes[host] = ErrCopyFailed
continue
}
}
cmd = createSSHCmd(host, remoteCmd)
log.Debugf("Created SSH command: %v", cmd)
runAtHost(host, cmd, r)
exitCode := 0
err = cmd.Wait()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok {
ws := exitErr.Sys().(syscall.WaitStatus)
exitCode = ws.ExitStatus()
} else {
// MacOS hack
exitCode = ErrMacOsExit
}
}
r.Codes[host] = exitCode
if exitCode != 0 {
r.ErrorHosts = append(r.ErrorHosts, host)
} else {
r.SuccessHosts = append(r.SuccessHosts, host)
}
// no delay after the last host
if delay > 0 && i != len(hosts)-1 {
log.Debugf("Delay %d secs", delay)
timer := time.After(time.Duration(delay) * time.Second)
signal.Notify(sigs, syscall.SIGINT)
timeLoop:
for {
select {
case <-sigs:
log.Debugf("Delay interrupted by ^C")
signal.Reset()
break execLoop
case <-timer:
log.Debugf("Delay finished")
signal.Reset()
break timeLoop
default:
continue
}
}
}
}
return r
}

68
remote/ssh.go Normal file
View File

@@ -0,0 +1,68 @@
package remote
import (
"fmt"
"os/exec"
"strings"
"github.com/viert/xc/log"
)
var (
sshOptions = map[string]string{
"PasswordAuthentication": "no",
"PubkeyAuthentication": "yes",
"StrictHostKeyChecking": "no",
"TCPKeepAlive": "yes",
"ServerAliveCountMax": "12",
"ServerAliveInterval": "5",
}
)
func sshOpts() (params []string) {
params = make([]string, 0)
for opt, value := range sshOptions {
option := fmt.Sprintf("%s=%s", opt, value)
params = append(params, "-o", option)
}
return
}
func createSCPCmd(host string, local string, remote string, recursive bool) *exec.Cmd {
params := []string{}
if recursive {
params = []string{"-r"}
}
params = append(params, sshOpts()...)
remoteExpr := fmt.Sprintf("%s@%s:%s", currentUser, host, remote)
params = append(params, local, remoteExpr)
log.Debugf("Created command scp %v", params)
return exec.Command("scp", params...)
}
func createSSHCmd(host string, argv string) *exec.Cmd {
params := []string{
"-tt",
"-l",
currentUser,
}
params = append(params, sshOpts()...)
params = append(params, host)
params = append(params, getInterpreter()...)
if argv != "" {
params = append(params, "-c", argv)
}
log.Debugf("Created command ssh %v", params)
return exec.Command("ssh", params...)
}
func getInterpreter() []string {
switch currentRaise {
case RTSudo:
return strings.Split(sudoInterpreter, " ")
case RTSu:
return strings.Split(suInterpreter, " ")
default:
return strings.Split(noneInterpreter, " ")
}
}

174
remote/worker.go Normal file
View File

@@ -0,0 +1,174 @@
package remote
import (
"regexp"
"sync"
"time"
"github.com/viert/xc/log"
)
// RaiseType enum
type RaiseType int
// Raise types
const (
RTNone RaiseType = iota
RTSu
RTSudo
)
// Task type represents a worker task descriptor
type Task struct {
Hostname string
LocalFilename string
RemoteFilename string
RecursiveCopy bool
Cmd string
WG *sync.WaitGroup
}
// MessageType describes a type of worker message
type MessageType int
// Message represents a worker message
type Message struct {
Data []byte
Type MessageType
Hostname string
StatusCode int
}
// Enum of OutputTypes
const (
MTData MessageType = iota
MTDebug
MTCopyFinished
MTExecFinished
)
// Custom error codes
const (
ErrMacOsExit = 32500 + iota
ErrForceStop
ErrCopyFailed
ErrTerminalError
ErrAuthenticationError
)
const (
pollDeadline = 50 * time.Millisecond
bufferSize = 4096
)
// Worker type represents a worker object
type Worker struct {
id int
queue chan *Task
data chan *Message
stop chan bool
busy bool
}
var (
wrkseq = 1
environment = []string{"LC_ALL=en_US.UTF-8", "LANG=en_US.UTF-8"}
// remote expressions to catch
exConnectionClosed = regexp.MustCompile(`([Ss]hared\s+)?[Cc]onnection\s+to\s+.+\s+closed\.?[\n\r]+`)
exPasswdPrompt = regexp.MustCompile(`[Pp]assword`)
exWrongPassword = regexp.MustCompile(`[Ss]orry.+try.+again\.?`)
exPermissionDenied = regexp.MustCompile(`[Pp]ermission\s+denied`)
exLostConnection = regexp.MustCompile(`[Ll]ost\sconnection`)
exEcho = regexp.MustCompile(`^[\n\r]+$`)
)
// NewWorker creates a new worker
func NewWorker(queue chan *Task, data chan *Message) *Worker {
w := &Worker{
id: wrkseq,
queue: queue,
data: data,
stop: make(chan bool, 1),
busy: false,
}
wrkseq++
go w.run()
return w
}
// ID is a worker id getter
func (w *Worker) ID() int {
return w.id
}
func (w *Worker) run() {
var result int
log.Debugf("WRK[%d] Started", w.id)
for task := range w.queue {
// Every task consists of copying part and executing part
// It may contain both or just one of them
// If there are both parts, worker copies data and then runs
// the given command immediately. This behaviour is handy for runscript
// command when the script is being copied to a remote server
// and called right after it.
w.busy = true
log.Debugf("WRK[%d] Got a task for host %s by worker", w.id, task.Hostname)
// does the task have anything to copy?
if task.RemoteFilename != "" && task.LocalFilename != "" {
result = w.copy(task)
log.Debugf("WRK[%d] Copy on %s, status=%d", w.id, task.Hostname, result)
w.data <- &Message{nil, MTCopyFinished, task.Hostname, result}
if result != 0 {
log.Debugf("WRK[%d] Copy on %s, result != 0, catching", w.id, task.Hostname)
// if copying failed we can't proceed further with the task if there's anything to run
if task.Cmd != "" {
log.Debugf("WRK[%d] Copy on %s, result != 0, task.Cmd == \"%s\", sending ExecFinished", w.id, task.Hostname, task.Cmd)
w.data <- &Message{nil, MTExecFinished, task.Hostname, ErrCopyFailed}
}
w.busy = false
if task.WG != nil {
task.WG.Done()
}
// next task
continue
}
}
// does the task have anything to run?
if task.Cmd != "" {
log.Debugf("WRK[%d] runcmd(%s) at %s", task.Cmd, task.Hostname)
result = w.runcmd(task)
w.data <- &Message{nil, MTExecFinished, task.Hostname, result}
}
if task.WG != nil {
task.WG.Done()
}
w.busy = false
}
log.Debugf("WRK[%d] Task queue has closed, worker is exiting", w.id)
}
// ForceStop stops the current task execution and returns true
// if any task were actually executed at the moment of calling ForceStop
func (w *Worker) ForceStop() bool {
if w.busy {
w.stop <- true
return true
}
return false
}
func (w *Worker) forceStopped() bool {
select {
case <-w.stop:
return true
default:
return false
}
}

12
store/backend.go Normal file
View File

@@ -0,0 +1,12 @@
package store
// Backend represents a store backend interface
type Backend interface {
Load() error
Reload() error
Datacenters() []*Datacenter
Groups() []*Group
WorkGroups() []*WorkGroup
Hosts() []*Host
}

310
store/parser.go Normal file
View File

@@ -0,0 +1,310 @@
package store
import (
"fmt"
"regexp"
"strings"
"github.com/viert/xc/stringslice"
)
type tokenType int
type parserstate int
const (
tTypeHost tokenType = iota
tTypeGroup
tTypeWorkGroup
tTypeHostRegexp
)
const (
stateWait parserstate = iota
stateReadHost
stateReadGroup
stateReadWorkGroup
stateReadDatacenter
stateReadTag
stateReadHostBracePattern
stateReadRegexp
)
type token struct {
Type tokenType
Value string
DatacenterFilter string
TagsFilter []string
RegexpFilter *regexp.Regexp
Exclude bool
}
var (
hostSymbols = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-{}"
)
func newToken() *token {
ct := new(token)
ct.TagsFilter = make([]string, 0)
ct.RegexpFilter = nil
return ct
}
func maybeAddHost(hostlist *[]string, host string, exclude bool) {
newHl := *hostlist
if exclude {
hIdx := stringslice.Index(newHl, host)
if hIdx >= 0 {
newHl = append(newHl[:hIdx], newHl[hIdx+1:]...)
}
} else {
newHl = append(newHl, host)
}
*hostlist = newHl
}
func parseExpression(expr []rune) ([]*token, error) {
ct := newToken()
res := make([]*token, 0)
state := stateWait
tag := ""
re := ""
last := false
for i := 0; i < len(expr); i++ {
sym := expr[i]
last = i == len(expr)-1
switch state {
case stateWait:
if sym == '-' {
ct.Exclude = true
continue
}
if sym == '*' {
state = stateReadWorkGroup
ct.Type = tTypeWorkGroup
continue
}
if sym == '%' {
state = stateReadGroup
ct.Type = tTypeGroup
continue
}
if sym == '#' {
ct.Type = tTypeWorkGroup
state = stateReadTag
tag = ""
continue
}
if sym == '/' || sym == '~' {
state = stateReadHost
ct.Type = tTypeHostRegexp
continue
}
if strings.ContainsRune(hostSymbols, sym) {
state = stateReadHost
ct.Type = tTypeHost
ct.Value += string(sym)
continue
}
return nil, fmt.Errorf("Invalid symbol %s, expected -, *, %% or a hostname at position %d", string(sym), i)
case stateReadGroup:
if sym == '@' {
state = stateReadDatacenter
continue
}
if sym == '#' {
state = stateReadTag
tag = ""
continue
}
if sym == '/' {
state = stateReadRegexp
re = ""
continue
}
if sym == ',' || last {
if last && sym != ',' {
ct.Value += string(sym)
}
if ct.Value == "" {
return nil, fmt.Errorf("Empty group name at position %d", i)
}
res = append(res, ct)
ct = newToken()
state = stateWait
continue
}
ct.Value += string(sym)
case stateReadWorkGroup:
if sym == '@' {
state = stateReadDatacenter
continue
}
if sym == '#' {
tag = ""
state = stateReadTag
continue
}
if sym == '/' {
state = stateReadRegexp
re = ""
continue
}
if sym == ',' || last {
if last && sym != ',' {
ct.Value += string(sym)
}
res = append(res, ct)
ct = newToken()
state = stateWait
continue
}
ct.Value += string(sym)
case stateReadRegexp:
if sym == '\\' && !last && expr[i+1] == '/' {
// screened slash
re += "/"
i++
continue
}
if sym == '/' {
compiled, err := regexp.Compile(re)
if err != nil {
return nil, fmt.Errorf("error compiling regexp at %d: %s", i, err)
}
ct.RegexpFilter = compiled
res = append(res, ct)
ct = newToken()
state = stateWait
// regexp should stop with '/EOL' or with '/,'
// however stateWait doesn't expect a comma, so
// we skip it:
if !last && expr[i+1] == ',' {
i++
}
continue
}
re += string(sym)
case stateReadHost:
if sym == '/' {
state = stateReadRegexp
re = ""
continue
}
if sym == '{' {
state = stateReadHostBracePattern
}
if sym == ',' || last {
if last && sym != ',' {
ct.Value += string(sym)
}
res = append(res, ct)
ct = newToken()
state = stateWait
continue
}
ct.Value += string(sym)
case stateReadHostBracePattern:
if sym == '{' {
return nil, fmt.Errorf("nested patterns are not allowed (at %d)", i)
}
if sym == '}' {
state = stateReadHost
}
ct.Value += string(sym)
case stateReadDatacenter:
if sym == ',' || last {
if last && sym != ',' {
ct.DatacenterFilter += string(sym)
}
res = append(res, ct)
ct = newToken()
state = stateWait
continue
}
if sym == '#' {
tag = ""
state = stateReadTag
continue
}
if sym == '/' {
re = ""
state = stateReadRegexp
continue
}
ct.DatacenterFilter += string(sym)
case stateReadTag:
if sym == ',' || last {
if last && sym != ',' {
tag += string(sym)
}
if tag == "" {
return nil, fmt.Errorf("empty tag at position %d", i)
}
ct.TagsFilter = append(ct.TagsFilter, tag)
res = append(res, ct)
ct = newToken()
state = stateWait
continue
}
if sym == '#' {
if tag == "" {
return nil, fmt.Errorf("Empty tag at position %d", i)
}
ct.TagsFilter = append(ct.TagsFilter, tag)
tag = ""
continue
}
tag += string(sym)
}
}
if ct.Value != "" || state == stateReadWorkGroup {
// workgroup token can be empty
res = append(res, ct)
} else {
if state != stateWait {
return nil, fmt.Errorf("unexpected end of expression")
}
}
if state == stateReadDatacenter || state == stateReadTag || state == stateReadHostBracePattern || state == stateReadRegexp {
return nil, fmt.Errorf("unexpected end of expression")
}
return res, nil
}

72
store/schema.go Normal file
View File

@@ -0,0 +1,72 @@
package store
// Datacenter represents datacenter object
type Datacenter struct {
ID string
Description string
Name string
ParentID string
Parent *Datacenter
Root *Datacenter
Children []*Datacenter
}
// Host represents host object
type Host struct {
ID string
Aliases []string
Tags []string
FQDN string
GroupID string
DatacenterID string
Description string
AllTags []string
Datacenter *Datacenter
Group *Group
}
// Group represents a group of hosts
type Group struct {
ID string
ParentID string
Tags []string
Description string
Name string
WorkGroupID string
AllTags []string
WorkGroup *WorkGroup
Children []*Group
Parent *Group
Hosts []*Host
}
// WorkGroup represents a group of users
type WorkGroup struct {
ID string
Name string
Description string
Groups []*Group
}
type dcstore struct {
_id map[string]*Datacenter
name map[string]*Datacenter
}
type groupstore struct {
_id map[string]*Group
name map[string]*Group
}
type hoststore struct {
_id map[string]*Host
fqdn map[string]*Host
}
type wgstore struct {
_id map[string]*WorkGroup
name map[string]*WorkGroup
}

405
store/store.go Normal file
View File

@@ -0,0 +1,405 @@
package store
import (
"regexp"
"sort"
"strings"
"github.com/viert/sekwence"
"github.com/viert/xc/stringslice"
)
// Store represents host tree data store
type Store struct {
datacenters *dcstore
groups *groupstore
hosts *hoststore
workgroups *wgstore
tags []string
backend Backend
}
func newStore() *Store {
s := new(Store)
s.datacenters = new(dcstore)
s.datacenters._id = make(map[string]*Datacenter)
s.datacenters.name = make(map[string]*Datacenter)
s.groups = new(groupstore)
s.groups._id = make(map[string]*Group)
s.groups.name = make(map[string]*Group)
s.hosts = new(hoststore)
s.hosts._id = make(map[string]*Host)
s.hosts.fqdn = make(map[string]*Host)
s.workgroups = new(wgstore)
s.workgroups._id = make(map[string]*WorkGroup)
s.workgroups.name = make(map[string]*WorkGroup)
s.tags = make([]string, 0)
return s
}
func (s *Store) addHost(host *Host) {
s.hosts.fqdn[host.FQDN] = host
s.hosts._id[host.ID] = host
}
func (s *Store) addGroup(group *Group) {
s.groups.name[group.Name] = group
s.groups._id[group.ID] = group
}
func (s *Store) addDatacenter(dc *Datacenter) {
s.datacenters.name[dc.Name] = dc
s.datacenters._id[dc.ID] = dc
}
func (s *Store) addWorkGroup(wg *WorkGroup) {
s.workgroups.name[wg.Name] = wg
s.workgroups._id[wg.ID] = wg
}
// CompleteTag returns all postfixes of tags starting with a given prefix
func (s *Store) CompleteTag(prefix string) []string {
res := make([]string, 0)
for _, tag := range s.tags {
if prefix == "" || strings.HasPrefix(tag, prefix) {
res = append(res, tag[len(prefix):])
}
}
sort.Strings(res)
return res
}
// CompleteHost returns all postfixes of host fqdns starting with a given prefix
func (s *Store) CompleteHost(prefix string) []string {
res := make([]string, 0)
for hostname := range s.hosts.fqdn {
if prefix == "" || strings.HasPrefix(hostname, prefix) {
res = append(res, hostname[len(prefix):])
}
}
sort.Strings(res)
return res
}
// CompleteGroup returns all postfixes of group names starting with a given prefix
func (s *Store) CompleteGroup(prefix string) []string {
res := make([]string, 0)
for name := range s.groups.name {
if prefix == "" || strings.HasPrefix(name, prefix) {
res = append(res, name[len(prefix):])
}
}
sort.Strings(res)
return res
}
// CompleteDatacenter returns all postfixes of dc names starting with a given prefix
func (s *Store) CompleteDatacenter(prefix string) []string {
res := make([]string, 0)
for name := range s.datacenters.name {
if prefix == "" || strings.HasPrefix(name, prefix) {
res = append(res, name[len(prefix):])
}
}
sort.Strings(res)
return res
}
// CompleteWorkGroup returns all postfixes of workgroup names starting with a given prefix
func (s *Store) CompleteWorkGroup(prefix string) []string {
res := make([]string, 0)
for name := range s.workgroups.name {
if prefix == "" || strings.HasPrefix(name, prefix) {
res = append(res, name[len(prefix):])
}
}
sort.Strings(res)
return res
}
func (s *Store) matchHost(pattern *regexp.Regexp) []string {
res := make([]string, 0)
for hostname := range s.hosts.fqdn {
if pattern.MatchString(hostname) {
res = append(res, hostname)
}
}
sort.Strings(res)
return res
}
func (s *Store) groupAllChildren(g *Group) []*Group {
children := make([]*Group, len(g.Children))
copy(children, g.Children)
for _, child := range g.Children {
children = append(children, s.groupAllChildren(child)...)
}
return children
}
func (s *Store) groupAllHosts(g *Group) []*Host {
allGroups := s.groupAllChildren(g)
allGroups = append(allGroups, g)
hosts := make([]*Host, 0)
for _, group := range allGroups {
hosts = append(hosts, group.Hosts...)
}
return hosts
}
// HostList returns a list of host FQDNs according to a given
// expression
func (s *Store) HostList(expr []rune) ([]string, error) {
tokens, err := parseExpression(expr)
if err != nil {
return nil, err
}
hostlist := make([]string, 0)
for _, token := range tokens {
switch token.Type {
case tTypeHostRegexp:
for _, host := range s.matchHost(token.RegexpFilter) {
maybeAddHost(&hostlist, host, token.Exclude)
}
case tTypeHost:
hosts, err := sekwence.ExpandPattern(token.Value)
if err != nil {
hosts = []string{token.Value}
}
for _, host := range hosts {
if len(token.TagsFilter) > 0 {
invhost, found := s.hosts.fqdn[host]
if !found {
continue
}
for _, tag := range token.TagsFilter {
if !stringslice.Contains(invhost.Tags, tag) {
continue
}
}
}
maybeAddHost(&hostlist, host, token.Exclude)
}
case tTypeGroup:
if group, found := s.groups.name[token.Value]; found {
hosts := s.groupAllHosts(group)
hostLoop1:
for _, host := range hosts {
if token.DatacenterFilter != "" {
if host.Datacenter == nil {
continue
}
if host.Datacenter.Name != token.DatacenterFilter {
// TODO tree
continue
}
}
for _, tag := range token.TagsFilter {
if !stringslice.Contains(host.Tags, tag) {
continue hostLoop1
}
}
if token.RegexpFilter != nil {
if !token.RegexpFilter.Match([]byte(host.FQDN)) {
continue
}
}
maybeAddHost(&hostlist, host.FQDN, token.Exclude)
}
}
case tTypeWorkGroup:
workgroups := make([]*WorkGroup, 0)
if token.Value == "" {
for _, wg := range s.workgroups.name {
workgroups = append(workgroups, wg)
}
} else {
wg, found := s.workgroups.name[token.Value]
if found {
workgroups = []*WorkGroup{wg}
}
}
if len(workgroups) > 0 {
hosts := make([]*Host, 0)
for _, wg := range workgroups {
groups := wg.Groups
for _, group := range groups {
hosts = append(hosts, group.Hosts...)
}
}
hostLoop2:
for _, host := range hosts {
if token.DatacenterFilter != "" {
if host.Datacenter == nil {
continue
}
if host.Datacenter.Name != token.DatacenterFilter {
// TODO tree
continue
}
}
for _, tag := range token.TagsFilter {
if !stringslice.Contains(host.Tags, tag) {
continue hostLoop2
}
}
if token.RegexpFilter != nil {
if !token.RegexpFilter.Match([]byte(host.FQDN)) {
continue
}
}
maybeAddHost(&hostlist, host.FQDN, token.Exclude)
}
}
}
}
// TODO: fix force sorting: sort inside the group/workgroup processing only
sort.Strings(hostlist)
return hostlist, nil
}
// apply is called after the raw data is loaded and creates relations
// between models according to relation ids
func (s *Store) apply() {
var host *Host
var group *Group
var parent *Group
var workgroup *WorkGroup
var datacenter *Datacenter
tagmap := make(map[string]bool)
for _, dc := range s.datacenters._id {
if dc.ParentID != "" {
dc.Parent = s.datacenters._id[dc.ParentID]
}
}
for _, dc := range s.datacenters._id {
if dc.Parent != nil {
datacenter = dc.Parent
for datacenter.Parent != nil {
datacenter = datacenter.Parent
}
dc.Root = datacenter
}
}
for _, group = range s.groups._id {
if group.ParentID != "" {
parent = s.groups._id[group.ParentID]
if parent != nil {
group.Parent = parent
parent.Children = append(parent.Children, group)
}
}
if group.WorkGroupID != "" {
workgroup = s.workgroups._id[group.WorkGroupID]
if workgroup != nil {
group.WorkGroup = workgroup
workgroup.Groups = append(workgroup.Groups, group)
}
}
}
// calculate AllTags for groups and collect all the tags into a set
for _, group = range s.groups._id {
// collecting tags in one set
for _, tag := range group.Tags {
tagmap[tag] = true
}
group.AllTags = make([]string, 0)
parent = group
for parent != nil {
group.AllTags = append(group.AllTags, parent.Tags...)
parent = parent.Parent
}
sort.Strings(group.AllTags)
}
for _, host = range s.hosts._id {
if host.GroupID != "" {
group = s.groups._id[host.GroupID]
if group != nil {
host.Group = group
group.Hosts = append(group.Hosts, host)
}
}
if host.DatacenterID != "" {
host.Datacenter = s.datacenters._id[host.DatacenterID]
}
}
// calculate AllTags for hosts
for _, host = range s.hosts._id {
// collecting tags in one set
for _, tag := range host.Tags {
tagmap[tag] = true
}
host.AllTags = make([]string, len(host.Tags))
copy(host.AllTags, host.Tags)
parent = host.Group
for parent != nil {
host.AllTags = append(host.AllTags, parent.Tags...)
parent = parent.Parent
}
}
for tag := range tagmap {
s.tags = append(s.tags, tag)
}
sort.Strings(s.tags)
}
// CreateStore creates a new store and loads data from a given backend
func CreateStore(backend Backend) (*Store, error) {
s := newStore()
s.backend = backend
err := backend.Load()
if err == nil {
for _, host := range backend.Hosts() {
s.addHost(host)
}
for _, group := range backend.Groups() {
s.addGroup(group)
}
for _, datacenter := range backend.Datacenters() {
s.addDatacenter(datacenter)
}
for _, workgroup := range backend.WorkGroups() {
s.addWorkGroup(workgroup)
}
s.apply()
}
return s, err
}
// BackendLoad is a proxy to backend.Load handler
func (s *Store) BackendLoad() error {
return s.backend.Load()
}
// BackendReload is a proxy to backend.Reload handler
func (s *Store) BackendReload() error {
return s.backend.Reload()
}

179
store/store_test.go Normal file
View File

@@ -0,0 +1,179 @@
package store
import (
"sort"
"testing"
)
type FakeBackend struct {
hosts []*Host
groups []*Group
workgroups []*WorkGroup
datacenters []*Datacenter
}
func (fb *FakeBackend) Hosts() []*Host {
return fb.hosts
}
func (fb *FakeBackend) Groups() []*Group {
return fb.groups
}
func (fb *FakeBackend) Datacenters() []*Datacenter {
return fb.datacenters
}
func (fb *FakeBackend) WorkGroups() []*WorkGroup {
return fb.workgroups
}
func (fb *FakeBackend) Load() error {
wg := &WorkGroup{
ID: "wg1",
Name: "workgroup",
Groups: make([]*Group, 0),
}
fb.workgroups = append(fb.workgroups, wg)
group1 := &Group{
ID: "g1",
Name: "group1",
WorkGroupID: "wg1",
ParentID: "",
Tags: []string{"tag1", "tag2"},
}
group2 := &Group{
ID: "g2",
Name: "group2",
WorkGroupID: "wg1",
ParentID: "g1",
Tags: []string{"tag3", "tag4"},
}
fb.groups = append(fb.groups, group1, group2)
dc1 := &Datacenter{
ID: "dc1",
Name: "datacenter1",
ParentID: "",
}
dc2 := &Datacenter{
ID: "dc2",
Name: "datacenter1.1",
ParentID: "dc1",
}
fb.datacenters = append(fb.datacenters, dc1, dc2)
host := &Host{
ID: "h1",
FQDN: "host1.example.com",
Aliases: []string{"host1", "host1.i"},
Tags: []string{"tag5"},
GroupID: "g2",
DatacenterID: "dc2",
}
fb.hosts = append(fb.hosts, host)
return nil
}
func (fb *FakeBackend) Reload() error {
return fb.Load()
}
func newFB() *FakeBackend {
fb := new(FakeBackend)
fb.hosts = make([]*Host, 0)
fb.groups = make([]*Group, 0)
fb.datacenters = make([]*Datacenter, 0)
fb.workgroups = make([]*WorkGroup, 0)
return fb
}
func TestStoreRelations(t *testing.T) {
var found bool
fb := newFB()
fb.Load()
s, err := CreateStore(fb)
if err != nil {
t.Error(err)
return
}
wg, found := s.workgroups._id["wg1"]
if !found {
t.Error("Workgroup wg1 not found")
}
g1, found := s.groups._id["g1"]
if !found {
t.Error("Group g1 not found")
}
g2, found := s.groups._id["g2"]
if !found {
t.Error("Group g2 not found")
}
h1, found := s.hosts._id["h1"]
if !found {
t.Error("Host h1 not found")
}
if g1.WorkGroup != wg {
t.Error("Group g1 should be connected to workgroup wg1")
}
if g2.WorkGroup != wg {
t.Error("Group g2 should be connected to workgroup wg1")
}
if g2.Parent != g1 {
t.Error("Group g2 parent must be g1")
}
found = false
for _, chg := range g1.Children {
if chg == g2 {
found = true
break
}
}
if !found {
t.Error("g2 should exist in g1.Children")
}
var tagsReal []string
var tagsExpected []string
tagsReal = g2.AllTags
tagsExpected = []string{"tag1", "tag2", "tag3", "tag4"}
sort.Strings(tagsReal)
if len(tagsReal) != len(tagsExpected) {
t.Errorf("Group2 AllTags expected to be of length %d, however its length is %d", len(tagsExpected), len(tagsReal))
return
}
for i := 0; i < len(tagsExpected); i++ {
if tagsReal[i] != tagsExpected[i] {
t.Errorf("Expected tag %s at position %d, found %s", tagsExpected[i], i, tagsReal[i])
}
}
tagsReal = h1.AllTags
tagsExpected = []string{"tag1", "tag2", "tag3", "tag4", "tag5"}
sort.Strings(tagsReal)
if len(tagsReal) != len(tagsExpected) {
t.Errorf("Host1 AllTags expected to be of length %d, however its length is %d", len(tagsExpected), len(tagsReal))
return
}
for i := 0; i < len(tagsExpected); i++ {
if tagsReal[i] != tagsExpected[i] {
t.Errorf("Expected tag %s at position %d, found %s", tagsExpected[i], i, tagsReal[i])
}
}
}

View File

@@ -0,0 +1,16 @@
package stringslice
// Index returns the index of item in a given array
func Index(arr []string, item string) int {
for i := 0; i < len(arr); i++ {
if item == arr[i] {
return i
}
}
return -1
}
// Contains returns true if the given array contains given item
func Contains(arr []string, item string) bool {
return Index(arr, item) >= 0
}

79
term/colors.go Normal file
View File

@@ -0,0 +1,79 @@
package term
import (
"fmt"
)
type colorValue int
// Color codes
const (
CBlack colorValue = 30
CRed colorValue = 31
CGreen colorValue = 32
CYellow colorValue = 33
CBlue colorValue = 34
CMagenta colorValue = 35
CCyan colorValue = 36
CLightGray colorValue = 37
CDarkGray colorValue = 90
CLightRed colorValue = 91
CLightGreen colorValue = 92
CLightYellow colorValue = 93
CLightBlue colorValue = 94
CLightMagenta colorValue = 95
CLightCyan colorValue = 96
CWhite colorValue = 97
)
// Colored wraps message into esc sequences to make it colored
func Colored(message string, c colorValue, bold bool) string {
bstr := ""
if bold {
bstr = ";1"
}
return fmt.Sprintf("\033[%d%sm%s\033[0m", c, bstr, message)
}
// Blue returns message colored with light blue color
func Blue(message string) string {
return Colored(message, CLightBlue, false)
}
// Red returns message colored with light red color
func Red(message string) string {
return Colored(message, CLightRed, false)
}
// Green returns message colored with light green color
func Green(message string) string {
return Colored(message, CLightGreen, false)
}
// Yellow returns message colored with light yellow color
func Yellow(message string) string {
return Colored(message, CLightYellow, false)
}
// Cyan returns message colored with light cyan color
func Cyan(message string) string {
return Colored(message, CLightCyan, false)
}
// Errorf prints a red-colored formatted error message
func Errorf(format string, args ...interface{}) {
message := fmt.Sprintf(format, args...)
fmt.Print(Red(message))
}
// Successf prints a green-colored formatted message
func Successf(format string, args ...interface{}) {
message := fmt.Sprintf(format, args...)
fmt.Print(Green(message))
}
// Warnf prints a yellow-colored formatted warning message
func Warnf(format string, args ...interface{}) {
message := fmt.Sprintf(format, args...)
fmt.Print(Yellow(message))
}

34
term/util.go Normal file
View File

@@ -0,0 +1,34 @@
package term
import (
"bytes"
"syscall"
"unsafe"
)
type winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
// HR returns a horizontal ruler of a given size
func HR(len int) string {
h := bytes.Repeat([]byte("="), len)
return string(h)
}
// GetTerminalWidth returns the current terminal width in symbols
func GetTerminalWidth() int {
ws := &winsize{}
retCode, _, errno := syscall.Syscall(syscall.SYS_IOCTL,
uintptr(syscall.Stdin),
uintptr(syscall.TIOCGWINSZ),
uintptr(unsafe.Pointer(ws)))
if int(retCode) == -1 {
panic(errno)
}
return int(ws.Col)
}