inventory and ci setup

This commit is contained in:
Sergey Bondarev
2018-07-26 22:23:45 +03:00
parent dde5d763b2
commit 9a24cda68c
9 changed files with 563 additions and 0 deletions

View File

@@ -0,0 +1,14 @@
#!/bin/sh
###ssh-agent bash
#ssh-add ~/.ssh/id_rsa
if [ -z "$1" ]; then
echo "Usage: $0 adminname"
exit 1
fi
d=$(date '+%Y.%m.%d_%H:%M')
ANSIBLE_FORCE_COLOR=true ansible-playbook -u $1 -k -i inventory/hosts site.yml -b --diff 2>&1 | tee "./deploy-$d.log"
sed -i -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g" "./deploy-$d.log"

View File

@@ -0,0 +1,19 @@
ceph_origin: repository
ceph_repository: community
ceph_repository_type: cdn
ceph_stable_release: luminous
public_network: "172.20.100.0/24"
cluster_network: "172.20.100.0/24"
osd_objectstore: bluestore
osd_scenario: collocated
devices:
- /dev/sdb
osd_pool_default_pg_num: 30
ceph_conf_overrides:
global:
"osd journal size": 1024
"osd pool default size": 3
"osd pool default min size": 2

View File

@@ -0,0 +1,20 @@
[mons]
ingress-1.slurm.io monitor_address=172.20.100.5
node-1.slurm.io monitor_address=172.20.100.6
node-2.slurm.io monitor_address=172.20.100.7
[osds]
ingress-1.slurm.io
node-1.slurm.io
node-2.slurm.io
[mgrs]
ingress-1.slurm.io
node-1.slurm.io
node-2.slurm.io
[mdss]
ingress-1.slurm.io
node-1.slurm.io
node-2.slurm.io

86
practice/ci-cd/setup.sh Executable file
View File

@@ -0,0 +1,86 @@
#!/bin/bash
CI_PROJECT_PATH_SLUG=$1
CI_ENVIRONMENT_NAME=$2
GREEN='\033[0;32m'
NC='\033[0m'
usage() {
echo "Usage: $0 CI_PROJECT_PATH_SLUG CI_ENVIRONMENT_NAME"
}
base64_decode_key() {
if [[ "$OSTYPE" == "linux"* ]]
then
echo "-d"
elif [[ "$OSTYPE" == "darwin"* ]]
then
echo "-D"
else
echo "--help"
fi
}
if [ -n "$CI_PROJECT_PATH_SLUG" ] && [ -n "$CI_ENVIRONMENT_NAME" ]
then
echo -e "${GREEN}creating namespace for project${NC}"
kubectl create namespace \
$CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME
echo
echo -e "${GREEN}creating CI serviceaccount for project${NC}"
kubectl create serviceaccount \
--namespace $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME \
$CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME
echo
echo -e "${GREEN}creating CI role for project${NC}"
cat << EOF | kubectl create --namespace $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME
rules:
- apiGroups: ["", "extensions", "apps", "certmanager.k8s.io"]
resources: ["*"]
verbs: ["*"]
EOF
echo
echo -e "${GREEN}creating CI rolebinding for project${NC}"
kubectl create rolebinding \
--namespace $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME \
--serviceaccount $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME:$CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME \
--role $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME \
$CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME
echo
echo -e "${GREEN}installing Tiller for project's CI${NC}"
helm init \
--service-account $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME \
--tiller-namespace $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME \
--history-max 10 \
--wait
echo
echo -e "${GREEN}access token for new CI user:${NC}"
kubectl get secret \
--namespace $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME \
$( \
kubectl get serviceaccount \
--namespace $CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME \
$CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_NAME \
-o jsonpath='{.secrets[].name}'\
) \
-o jsonpath='{.data.token}' | base64 $(base64_decode_key)
echo
else
usage
fi

View File

@@ -0,0 +1,151 @@
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
## Flannel
flannel_backend_type: "host-gw"
flannel_interface_regexp: '172\\.20\\.100\\.\\d{1,3}'
###flannel_interface: "eth0"
#Directory where etcd data stored
etcd_data_dir: /var/lib/etcd
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
## The access_ip variable is used to define how other nodes should access
## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
#access_ip: 1.1.1.1
### LOADBALANCING AND ACCESS MODES
## Enable multiaccess to configure etcd clients to access all of the etcd members directly
## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
## This may be the case if clients support and loadbalance multiple etcd servers natively.
etcd_multiaccess: true
### ETCD: disable peer client cert authentication.
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
#etcd_peer_client_auth: true
## External LB example config
## apiserver_loadbalancer_domain_name: "elb.some.domain"
#loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234
## Internal loadbalancers for apiservers
#loadbalancer_apiserver_localhost: true
## Local loadbalancer should use this port instead, if defined.
## Defaults to kube_apiserver_port (6443)
#nginx_kube_apiserver_port: 8443
### OTHER OPTIONAL VARIABLES
## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
## modules.
kubelet_load_modules: true
## Internal network total size. This is the prefix of the
## entire network. Must be unused in your environment.
#kube_network_prefix: 18
## With calico it is possible to distributed routes with border routers of the datacenter.
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
## The subnets of each nodes will be distributed by the datacenter router
#peer_with_router: false
## Upstream dns servers used by dnsmasq
#upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4
## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using nova-client before starting the playbook.
#cloud_provider:
## When azure is used, you need to also set the following variables.
## see docs/azure.md for details on how to get these values
#azure_tenant_id:
#azure_subscription_id:
#azure_aad_client_id:
#azure_aad_client_secret:
#azure_resource_group:
#azure_location:
#azure_subnet_name:
#azure_security_group_name:
#azure_vnet_name:
#azure_vnet_resource_group:
#azure_route_table_name:
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
#openstack_blockstorage_version: "v1/v2/auto (default)"
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
#openstack_lbaas_enabled: True
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
## To enable automatic floating ip provisioning, specify a subnet.
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
## Override default LBaaS behavior
#openstack_lbaas_use_octavia: False
#openstack_lbaas_method: "ROUND_ROBIN"
#openstack_lbaas_provider: "haproxy"
#openstack_lbaas_create_monitor: "yes"
#openstack_lbaas_monitor_delay: "1m"
#openstack_lbaas_monitor_timeout: "30s"
#openstack_lbaas_monitor_max_retries: "3"
## Uncomment to enable experimental kubeadm deployment mode
#kubeadm_enabled: false
## Set these proxy values in order to update package manager and docker daemon to use proxies
#http_proxy: ""
#https_proxy: ""
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
#no_proxy: ""
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
docker_storage_options: -s overlay2
# Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
docker_dns_servers_strict: false
## Default packages to install within the cluster, f.e:
#kpm_packages:
# - name: kube-system/grafana
## Certificate Management
## This setting determines whether certs are generated via scripts or whether a
## cluster of Hashicorp's Vault is started to issue certificates (using etcd
## as a backend). Options are "script" or "vault"
#cert_management: script
# Set to true to allow pre-checks to fail and continue deployment
#ignore_assert_errors: false
#etcd_heartbeat_interval: "100"
#etcd_election_timeout: "5000"
etcd_snapshot_count: "5000"
# -c Takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle.
# -n Takes an integer between 0 (highest priority) and 7 (lowest priority)
etcd_ionice: "-c2 -n0"
## Etcd auto compaction retention for mvcc key value store in hour
#etcd_compaction_retention: 0
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
#etcd_metrics: basic
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
etcd_memory_limit: 0
# The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
kube_read_only_port: 10255

View File

@@ -0,0 +1,240 @@
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
kube_api_anonymous_auth: false
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 1
# Users to create for basic auth in Kubernetes API via HTTP
# Optionally add groups for user
kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
groups:
- system:masters
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
kube_oidc_auth: false
kube_basic_auth: false
#kube_token_auth: false
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
# kube_oidc_url: https:// ...
# kube_oidc_client_id: kubernetes
## Optional settings for OIDC
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
# kube_oidc_username_claim: sub
# kube_oidc_groups_claim: groups
# Choose network plugin (cilium, calico, contiv, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: flannel
# weave's network password for encryption
# if null then no network encryption
# you can use --extra-vars to pass the password in command line
weave_password: EnterPasswordHere
# Weave uses consensus mode by default
# Enabling seed mode allow to dynamically add or remove hosts
# https://www.weave.works/docs/net/latest/ipam/
weave_mode_seed: false
# This two variable are automatically changed by the weave's role, do not manually change these values
# To reset values :
# weave_seed: uninitialized
# weave_peers: uninitialized
weave_seed: uninitialized
weave_peers: uninitialized
# Set the MTU of Weave (default 1376, Jumbo Frames: 8916)
weave_mtu: 1376
# Enable kubernetes network policies
enable_network_policy: false
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.0.0.0/16
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.100.0.0/16
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
kube_network_node_prefix: 24
# The port the API Server will be listening on.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_port: 6443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
#kube_apiserver_insecure_port: 0 # (disabled)
# Kube-proxy proxyMode configuration.
# Can be ipvs, iptables
kube_proxy_mode: iptables
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: slurm.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns, manual or none
dns_mode: kubedns
# Set manual server if using a custom cluster DNS server
#manual_dns_server: 10.x.x.x
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} {{ docker_log_opts }}"
docker_bin_dir: "/usr/bin"
# Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
kubelet_deployment_type: host
vault_deployment_type: docker
helm_deployment_type: host
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled: true
# Monitoring apps for k8s
efk_enabled: false
# Helm deployment
helm_enabled: false
# Istio deployment
istio_enabled: false
# Registry deployment
registry_enabled: false
# Local volume provisioner deployment
local_volume_provisioner_enabled: false
# local_volume_provisioner_namespace: "{{ system_namespace }}"
# local_volume_provisioner_base_dir: /mnt/disks
# local_volume_provisioner_mount_dir: /mnt/disks
# local_volume_provisioner_storage_class: local-storage
# CephFS provisioner deployment
cephfs_provisioner_enabled: false
# cephfs_provisioner_namespace: "{{ system_namespace }}"
# cephfs_provisioner_cluster: ceph
# cephfs_provisioner_monitors:
# - 172.24.0.1:6789
# - 172.24.0.2:6789
# - 172.24.0.3:6789
# cephfs_provisioner_admin_id: admin
# cephfs_provisioner_secret: secret
# cephfs_provisioner_storage_class: cephfs
# Nginx ingress controller deployment
ingress_nginx_enabled: true
ingress_nginx_host_network: true
ingress_nginx_nodeselector:
node-role.kubernetes.io/ingress: "true"
# ingress_nginx_namespace: "ingress-nginx"
# ingress_nginx_insecure_port: 80
# ingress_nginx_secure_port: 443
ingress_nginx_configmap:
server-tokens: "False"
proxy-body-size: "2G"
proxy-buffer-size: "16k"
worker-shutdown-timeout: "180"
# map-hash-bucket-size: "128"
# ssl-protocols: "SSLv2"
# ingress_nginx_configmap_tcp_services:
# 9000: "default/example-go:8080"
# ingress_nginx_configmap_udp_services:
# 53: "kube-system/kube-dns:53"
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
persistent_volumes_enabled: false
# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts
kubeconfig_localhost: true
# Download kubectl onto the host that runs Ansible in GITDIR/artifacts
kubectl_localhost: true
# dnsmasq
# dnsmasq_upstream_dns_servers:
# - /resolvethiszone.with/10.0.4.250
# - 8.8.8.8
# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
# kubelet_cgroups_per_qos: true
# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
# kubelet_enforce_node_allocatable: pods
## Supplementary addresses that can be added in kubernetes ssl keys.
## That can be useful for example to setup a keepalived virtual IP
# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3]
## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
## See https://github.com/kubernetes-incubator/kubespray/issues/2141
## Set this variable to true to get rid of this issue
volume_cross_zone_attachment: false
# Added support for kubelet webhook authentication/authorization #2184
# https://github.com/kubernetes-incubator/kubespray/pull/2184
# https://factory.southbridge.io/issues/262494
kubelet_authentication_token_webhook: true
kubelet_authorization_mode_webhook: true

View File

@@ -0,0 +1,2 @@
node_labels:
node-role.kubernetes.io/ingress: "true"

View File

@@ -0,0 +1,31 @@
# ## Configure 'ip' variable to bind kubernetes services on a
# ## different ip than the default iface
master-1.slurm.io ansible_ssh_host=172.20.100.2 ip=172.20.100.2
master-2.slurm.io ansible_ssh_host=172.20.100.3 ip=172.20.100.3
master-3.slurm.io ansible_ssh_host=172.20.100.4 ip=172.20.100.4
ingress-1.slurm.io ansible_ssh_host=172.20.100.5 ip=172.20.100.5
node-1.slurm.io ansible_ssh_host=172.20.100.6 ip=172.20.100.6
node-2.slurm.io ansible_ssh_host=172.20.100.7 ip=172.20.100.7
[kube-master]
master-1.slurm.io
master-2.slurm.io
master-3.slurm.io
[etcd]
master-1.slurm.io
master-2.slurm.io
master-3.slurm.io
[kube-node]
node-1.slurm.io
node-2.slurm.io
ingress-1.slurm.io
[kube-ingress]
ingress-1.slurm.io
[k8s-cluster:children]
kube-node
kube-master