diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..785088692de3e03492b1faf31598fbfcc9b219c5 --- /dev/null +++ b/.gitignore @@ -0,0 +1,281 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +.idea + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +.idea \ No newline at end of file diff --git a/dsms_deploy/Makefile b/dsms_deploy/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..be8609f6d1723c15289967c7cbbd31a5437c8606 --- /dev/null +++ b/dsms_deploy/Makefile @@ -0,0 +1,39 @@ +RPM_NAME=dsms-deploy +RPM_VERSION=1.1 +RPM_RELEASE=2.0.0 +RPM_ARCH=x86_64 +RPM_BUILD_DIR=$(shell pwd)/rpmbuild +RPM_SPEC_FILE=$(RPM_BUILD_DIR)/SPECS/$(RPM_NAME).spec + +.PHONY: clean build + +clean: + rm -rf target + rm -rf dist + rm -rf $(RPM_BUILD_DIR) +build: clean + mkdir -p $(RPM_BUILD_DIR)/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} + mkdir -p $(RPM_BUILD_DIR)/BUILD/$(RPM_NAME)/ + pyinstaller --onefile dsms-deploy.py + cp dist/dsms-deploy $(RPM_BUILD_DIR)/BUILD/$(RPM_NAME)/ + cp config/cluster.conf $(RPM_BUILD_DIR)/BUILD/$(RPM_NAME)/ + cp config/add-node.conf $(RPM_BUILD_DIR)/BUILD/$(RPM_NAME)/ + cp config/depend_list $(RPM_BUILD_DIR)/BUILD/$(RPM_NAME)/ + cp config/whl/* $(RPM_BUILD_DIR)/BUILD/$(RPM_NAME)/ + + sed -e "s/@RPM_NAME@/$(RPM_NAME)/g" \ + -e "s/@RPM_VERSION@/$(RPM_VERSION)/g" \ + -e "s/@RPM_RELEASE@/$(RPM_RELEASE)/g" \ + -e "s/@RPM_ARCH@/$(RPM_ARCH)/g" \ + -e "s|@RPM_BUILD_DIR@|$(RPM_BUILD_DIR)|g" \ + config/$(RPM_NAME).spec > $(RPM_SPEC_FILE) + + rpmbuild -bb $(RPM_SPEC_FILE) + + mkdir target + + find $(RPM_BUILD_DIR)/RPMS -type f -name "*.rpm" -exec cp {} ./target \; + + rm -rf $(RPM_BUILD_DIR) + + rm -rf dist \ No newline at end of file diff --git a/dsms_deploy/config/add-node.conf b/dsms_deploy/config/add-node.conf new file mode 100644 index 0000000000000000000000000000000000000000..7579c7028ada97762a237399d53fba534689deff --- /dev/null +++ b/dsms_deploy/config/add-node.conf @@ -0,0 +1,27 @@ +#Configuration file of new node in existing cluster + +#NEW_NODE1 +#cluster new node ip +#Requries Configuration +NEW_NODE1_IP= +#cluster new node hostname +#Requries Configuration +NEW_NODE1_HOSTNAME= +#cluster new node ssh port +#Requries Configuration +NEW_NODE1_SSH_PORT=22 +#cluster new node root password +#Requries Configuration +NEW_NODE1_ROOT_PASSWD= +#A daemon that maintains a map of the state of the cluster. +#Requries Configuration +NEW_NODE1_MON=yes + + +#Customize yum repository public key file +#support local gpg key file path or remote http gpg key file url +#Optional Configuration +REPO_KEY_FILE= +#Customize yum repository URL +#Optional Configuration +REPO_URL= \ No newline at end of file diff --git a/dsms_deploy/config/cluster.conf b/dsms_deploy/config/cluster.conf new file mode 100644 index 0000000000000000000000000000000000000000..9f1d8c74c33fb3f88f13ed6e3acaf04a53fd21f5 --- /dev/null +++ b/dsms_deploy/config/cluster.conf @@ -0,0 +1,70 @@ +#Deploy the config file for the new cluster + +#NODE1 +#cluster node ip,dsms-deploy use ssh to connect +#require config +NODE1_IP= +#cluster node hostname,dsms-deploy will change the node hostname +#require config +NODE1_HOSTNAME= +#cluster node ssh port +#require config +NODE1_SSH_PORT=22 +#cluster node root password +#require config +NODE1_ROOT_PASSWD= +#A daemon that maintains a map of the state of the cluster. +#require config +NODE1_MON=yes + +#NODE2 +#cluster node ip,dsms-deploy use ssh to connect +#require config +NODE2_IP= +#cluster node hostname,dsms-deploy will change the node hostname +#require config +NODE2_HOSTNAME= +#cluster node ssh port +#require config +NODE2_SSH_PORT=22 +#cluster node root password +#require config +NODE2_ROOT_PASSWD= +#A daemon that maintains a map of the state of the cluster. +#require config +NODE2_MON=yes + + +#NODE3 +#cluster node ip,dsms-deploy use ssh to connect +#require config +NODE3_IP= +#cluster node hostname,dsms-deploy will change the node hostname +#require config +NODE3_HOSTNAME= +#cluster node ssh port +#require config +NODE3_SSH_PORT=22 +#cluster node root password +#require config +NODE3_ROOT_PASSWD= +#A daemon that maintains a map of the state of the cluster. +#require config +NODE3_MON=yes + +#CLUSTER NETWORK CONFIG +#use CIDR notation for subnets (e.g., 10.0.0.0/24). +#clusters front-side public network for access service +#require config +PUBLIC_NETWORK= +#cluster back side cluster network for osd object replication,heart beats,backfilling,recovery,etc. +#require config +CLUSTER_NETWORK= + +#Customize yum repository public key file +#support local gpg key file path or remote http gpg key file url +#Optional config +REPO_KEY_FILE= +#Customize yum repository URL +#Optional config +REPO_URL= \ No newline at end of file diff --git a/dsms_deploy/config/depend_list b/dsms_deploy/config/depend_list new file mode 100644 index 0000000000000000000000000000000000000000..1778da1a33e4fe9176071de6fe0603c92c052d56 --- /dev/null +++ b/dsms_deploy/config/depend_list @@ -0,0 +1,80 @@ +depend=abseil-cpp-20200923.3-1.el8.x86_64.rpm +depend=gperftools-libs-2.7-9.el8.x86_64.rpm +depend=grpc-data-1.34.1-8.0.1.an8.noarch.rpm +depend=leveldb-1.22-1.el8.x86_64.rpm +depend=leveldb-devel-1.22-1.el8.x86_64.rpm +depend=liboath-2.6.2-3.el8.x86_64.rpm +depend=libunwind-1.3.1-3.el8.x86_64.rpm +depend=platform-python-devel-3.6.8-45.0.1.an8.x86_64.rpm +depend=protobuf-3.14.0-6.an8.x86_64.rpm +depend=python3-apipkg-1.5-6.el8.noarch.rpm +depend=python3-beautifulsoup4-4.6.3-2.el8.1.noarch.rpm +depend=python3-bcrypt-3.1.6-2.el8.1.x86_64.rpm +depend=python3-cheroot-8.5.2-1.el8.noarch.rpm +depend=python3-cherrypy-18.4.0-1.el8.noarch.rpm +depend=python3-execnet-1.7.1-1.el8.noarch.rpm +depend=python3-grpcio-1.34.1-8.0.1.an8.x86_64.rpm +depend=python3-jaraco-6.2-6.el8.noarch.rpm +depend=python3-jaraco-functools-2.0-4.el8.noarch.rpm +depend=python3-logutils-0.3.5-11.el8.noarch.rpm +depend=python3-more-itertools-7.2.0-3.el8.noarch.rpm +depend=python3-pecan-1.3.2-9.el8.noarch.rpm +depend=python3-portend-2.6-1.el8.noarch.rpm +depend=python3-remoto-1.2.1-1.el8.noarch.rpm +depend=python3-repoze-lru-0.7-6.el8.noarch.rpm +depend=python3-routes-2.4.1-12.el8.noarch.rpm +depend=python3-simplegeneric-0.8.1-17.el8.noarch.rpm +depend=python3-singledispatch-3.4.0.3-18.el8.noarch.rpm +depend=python3-tempora-1.14.1-5.el8.noarch.rpm +depend=python3-trustme-0.6.0-4.el8.noarch.rpm +depend=python3-waitress-1.2.1-2.el8.1.noarch.rpm +depend=python3-webob-1.8.5-1.el8.1.noarch.rpm +depend=python3-webtest-2.0.33-1.el8.noarch.rpm +depend=python3-zc-lockfile-2.0-2.el8.noarch.rpm +depend=re2-20190801-1.el8.x86_64.rpm +depend=resource-agents-4.9.0-35.el8.x86_64.rpm +whl=ceph-deploy-2.0.1.tar.gz +ceph_rpm=ceph-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=cephadm-2.0.0_anolis-0.an8.noarch.rpm +ceph_rpm=ceph-base-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-common-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-fuse-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-grafana-dashboards-2.0.0_anolis-0.an8.noarch.rpm +ceph_rpm=ceph-immutable-object-cache-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-mds-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-mgr-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-mgr-cephadm-2.0.0_anolis-0.an8.noarch.rpm +ceph_rpm=ceph-mgr-dashboard-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-mgr-diskprediction-cloud-2.0.0_anolis-0.an8.noarch.rpm +ceph_rpm=ceph-mgr-diskprediction-local-2.0.0_anolis-0.an8.noarch.rpm +ceph_rpm=ceph-mgr-modules-core-2.0.0_anolis-0.an8.noarch.rpm +ceph_rpm=ceph-mon-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-osd-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-prometheus-alerts-2.0.0_anolis-0.an8.noarch.rpm +ceph_rpm=ceph-radosgw-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-resource-agents-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=ceph-selinux-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=libcephfs2-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=libcephfs-devel-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=librados2-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=librados-devel-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=libradospp-devel-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=libradosstriper1-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=libradosstriper-devel-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=librbd1-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=librbd-devel-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=librgw2-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=librgw-devel-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=python3-ceph-argparse-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=python3-ceph-common-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=python3-cephfs-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=python3-rados-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=python3-rbd-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=python3-rgw-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=rados-objclass-devel-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=rbd-fuse-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=rbd-mirror-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=rbd-nbd-2.0.0_anolis-0.an8.x86_64.rpm +ceph_rpm=dsms-prometheus-2.32.1-2.el8.x86_64.rpm +ceph_rpm=dsms-prometheus-alertmanager-0.23.0-7.el8.x86_64.rpm +ceph_rpm=dsms-prometheus-node-exporter-1.3.1-4.el8.x86_64.rpm \ No newline at end of file diff --git a/dsms_deploy/config/dsms-deploy.spec b/dsms_deploy/config/dsms-deploy.spec new file mode 100644 index 0000000000000000000000000000000000000000..88baa7f461dbda802ea9a17046cfec67b44d73cc --- /dev/null +++ b/dsms_deploy/config/dsms-deploy.spec @@ -0,0 +1,42 @@ +%define _topdir @RPM_BUILD_DIR@ +%define name @RPM_NAME@ +%define version @RPM_VERSION@ +%define release @RPM_RELEASE@ +%define arch @RPM_ARCH@ + +Name: %{name} +Version: %{version} +Release: %{release} +Summary: DSMS STORAGE components, providing a distributed storage. +License: MIT +BuildArch: %{arch} + +Requires: python38 + +%description +STORAGE components, based on open source distributed storage customization. +%install +mkdir -p %{buildroot}/etc/dsms/dsms-deploy/ +cp dsms-deploy/*.conf %{buildroot}/etc/dsms/dsms-deploy/ +cp dsms-deploy/depend_list %{buildroot}/etc/dsms/dsms-deploy/ +cp dsms-deploy/dsms-deploy %{buildroot}/etc/dsms/dsms-deploy/ +cp dsms-deploy/*.tar.gz %{buildroot}/etc/dsms/dsms-deploy/ + +%post + + +%posttrans +mv /etc/dsms/dsms-deploy/dsms-deploy /usr/local/bin/dsms-deploy + + + +%preun + +%postun +rm -rf /usr/local/bin/dsms-deploy + + +%files +/etc/* + +%changelog \ No newline at end of file diff --git a/dsms_deploy/config/whl/ceph-deploy-2.0.1.tar.gz b/dsms_deploy/config/whl/ceph-deploy-2.0.1.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..bc0ef86f7b029c24af5db83c112ba546d7db5196 Binary files /dev/null and b/dsms_deploy/config/whl/ceph-deploy-2.0.1.tar.gz differ diff --git a/dsms_deploy/dsms-deploy.py b/dsms_deploy/dsms-deploy.py new file mode 100755 index 0000000000000000000000000000000000000000..53774327f3d9e2da10950a05f967f58b2bbc6111 --- /dev/null +++ b/dsms_deploy/dsms-deploy.py @@ -0,0 +1,784 @@ +#!/usr/bin/python3 +import argparse +import io +import ipaddress +import json +import logging +import os +import sys +import time +from logging.config import dictConfig + +import paramiko + +try: + from typing import Dict, List, Tuple, Optional, Union, Any, NoReturn, Callable, IO +except ImportError: + pass + +VERSION = '1.0.0' +SSH_TIMEOUT = 10 +DEPLOY_USERNAME = 'root' +DEPLOY_PACKAGE = '/etc/dsms/dsms-deploy' +LOG_DIR = '/var/log/dsms/dsms-deploy' +CLUSTER_CONF = 'cluster.conf' +ADD_NODE_CONF = 'add-node.conf' +DEPEND_LIST = 'depend_list' +LEADER_NODE = 'NODE1' +SSH_PATH = '/root/.ssh' + +INIT_DIR = '/home/my-cluster' + +update_hosts = """ +#!/bin/bash +IP=$1 +HOSTNAME=$2 +if [ ! -n "$IP" ];then + echo "Please fill in parameter 1 as the IP address!" + exit 1 +fi +if [ ! -n "$HOSTNAME" ];then + echo "Please fill in parameter 2 as HOSTNAME!" + exit 1 +fi +ITEM="$IP $HOSTNAME" +HOSTS="/etc/hosts" +#update /etc/hosts file +add_hosts=true +while read line || [[ -n ${line} ]]; do + if [ "$ITEM" == "$line" ];then + add_hosts=false + break + else + i=1 + for host in $line;do + if [ $i -eq 2 ];then + if [ $host == $HOSTNAME ];then + #Delete the hosts entry that was the same as the current host but with a different ip + sed -i "/${line}/d" $HOSTS + fi + fi + i=`expr $i + 1` + done + continue + fi +done < $HOSTS + +if [ $add_hosts == true ];then + echo "$ITEM" >> $HOSTS +fi +""" + +# Log and console output config +logging_config = { + 'version': 1, + 'disable_existing_loggers': True, + 'formatters': { + 'paramiko': { + 'format': '%(asctime)s %(thread)x %(levelname)s %(message)s' + }, + 'dsms-storage': { + 'format': '%(asctime)s %(thread)x %(levelname)s DSMS-DEPLOY: %(message)s' + }, + 'console': { + 'format': '%(asctime)s %(message)s' + }, + }, + 'handlers': { + 'console': { + 'level': 'INFO', + 'class': 'logging.StreamHandler', + 'formatter': 'console', + + }, + 'log_file': { + 'level': 'DEBUG', + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'dsms-storage', + 'filename': '%s/dsms-deploy.log' % LOG_DIR, + 'maxBytes': 1024000, + 'backupCount': 1, + }, + 'paramiko_log_file': { + 'level': 'INFO', + 'class': 'logging.handlers.RotatingFileHandler', + 'formatter': 'paramiko', + 'filename': '%s/dsms-deploy.log' % LOG_DIR, + 'maxBytes': 1024000, + 'backupCount': 1, + } + }, + 'loggers': { + '': { + 'level': 'INFO', + 'handlers': ['console', 'log_file'], + }, + 'paramiko': { + 'level': 'INFO', + 'handlers': ['paramiko_log_file'], + 'propagate': False, + } + } +} + + +class termcolor: + yellow = '\033[93m' + red = '\033[31m' + end = '\033[0m' + + +class DsmsDeployException(Exception): + """ + Custom exception class + """ + + def __init__(self, message): + self.message = message + + def __str__(self): + return '{}{}{}'.format(termcolor.red, f"DsmsDeployException: {self.message}", termcolor.end) + + +class Node: + def __init__(self, ip, hostname, ssh_port, root_passwd, mon, repo_key_file, + repo_url, public_network, cluster_network, leader): + self.ip = ip + self.hostname = hostname + self.ssh_port = ssh_port + self.root_passwd = root_passwd + self.mon = mon + self.repo_key_file = repo_key_file + self.repo_url = repo_url + self.public_network = public_network + self.cluster_network = cluster_network + self.leader = leader + + +def dsms_require_root() -> None: + """Exit if the process is not running as root.""" + if os.geteuid() != 0: + sys.stderr.write('ERROR: dsms-deploy should be run as root\n') + sys.exit(1) + + +def find_nth(findstr, index, n): + start = findstr.find(index) + while start >= 0 and n > 1: + start = findstr.find(index, start + len(index)) + n -= 1 + return start + + +def valid_node(nodes, type): + for key, value in nodes.items(): + for field in ['ip', 'hostname', 'ssh_port', 'root_passwd']: + if not getattr(value, field, None): + raise DsmsDeployException(f'server {key} {field} is require field') + if type == 'new': + for field in ['public_network', 'cluster_network']: + if not getattr(value, field, None): + raise DsmsDeployException(f'init server {key} server {field} is require field') + try: + # Verify that the IP address format is correct + ipaddress.ip_address(value.ip) + except ValueError: + raise DsmsDeployException(f'server {key} IP address illegal') + + try: + # Verify that the ssh port format is correct + port = int(value.ssh_port) + if not 1 <= port <= 65535: + raise ValueError + except ValueError: + raise DsmsDeployException(f'server {key} SSH port {value.ssh_port} illegal') + + # Verify SSH connectivity + try: + with paramiko.SSHClient() as ssh: + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(value.ip, port=value.ssh_port, username=DEPLOY_USERNAME, password=value.root_passwd) + try: + with ssh.open_sftp() as sftp: + sftp.listdir('.') + except Exception: + raise DsmsDeployException(f'server {key} SFTP config error, Please check the server config /etc/ssh/sshd_config') + except DsmsDeployException as e: + raise DsmsDeployException(e.message) + except paramiko.AuthenticationException as e: + raise DsmsDeployException(f'server {key} SSH Authentication fail, Please check the root passwd') + except Exception as e: + raise DsmsDeployException(f'server {key} SSH connect fail, Please check the ssh port') + + +def read_cluster_conf(conf_path): + nodes = {} + with open(conf_path) as f: + for line in f: + if line.strip() and not line.startswith("#") and line.startswith("NODE"): + key, value = line.strip().split('=') + if not value: + continue + node_name, attr_name = key.split('_', 1) + node = nodes.get(node_name) + if not node: + node = Node('', '', '', '', '', '', '', '', '', '') + nodes[node_name] = node + setattr(node, attr_name.lower(), value) + if node_name == LEADER_NODE: + setattr(node, 'leader', True) + if line.strip() and not line.startswith("#") and not line.startswith("NODE"): + key, value = line.strip().split('=') + if not value: + continue + for node in nodes.values(): + setattr(node, key.lower(), value) + valid_node(nodes, 'new') + return nodes + + +def read_add_conf(conf_path): + nodes = {} + with open(conf_path) as f: + for line in f: + if line.strip() and not line.startswith("#") and line.startswith("NEW_NODE"): + key, value = line.strip().split('=') + if not value: + continue + node_name = key[:find_nth(key, '_', 2)] + attr_name = key[find_nth(key, '_', 2) + 1:] + node = nodes.get(node_name) + if not node: + node = Node('', '', '', '', '', '', '', '', '', '') + nodes[node_name] = node + setattr(node, attr_name.lower(), value) + if line.strip() and not line.startswith("#") and line.startswith("REPO"): + key, value = line.strip().split('=') + if not value: + continue + for node in nodes.values(): + setattr(node, key.lower(), value) + valid_node(nodes, 'add') + return nodes + + +def run_ssh_command(server, username, password, command): + logger.info(f"{server} execute start: `{command}`") + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_client.connect(hostname=server, username=username, password=password, + timeout=SSH_TIMEOUT, allow_agent=False, + look_for_keys=False) + + stdin, stdout, stderr = ssh_client.exec_command(command) + # Create a class file object + output_file = io.StringIO() + + # Real-time prints execution and writes to a class file object + for line in stdout: + logger.info(line.strip()) + output_file.write(line) + for line in stderr: + logger.info(line.strip()) + output_file.write(line) + + exit_code = stdout.channel.recv_exit_status() + # Save the execution to a variable + output = output_file.getvalue() + ssh_client.close() + + if exit_code == 0: + logger.info(f"{server} execute end: `{command}` success") + return output + else: + logger.error('{}{}{}'.format(termcolor.red, f"{server} execute end: `{command}` failed", termcolor.end)) + raise DsmsDeployException(output) + + +def run_ftp_command(local_file, server, username, password, remote_dir, + extract_command=None): + """ + Copy the local file to the remote server and execute extract command + + :param local_file: local file path + :param server: remote server address + :param username: remote server login username + :param password: remote server login password + :param remote_dir: remote destination directory + :param extract_command: extract command, optional parameter + :return: command execute result + """ + + # create ssh client + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.connect(server, username=username, password=password, + allow_agent=False, look_for_keys=False) + + # create sftp client + sftp = client.open_sftp() + + # put local file to remote server + remote_file = os.path.join(remote_dir, os.path.basename(local_file)) + sftp.put(local_file, remote_file) + + # close sftp client + sftp.close() + + # execute extract command + if extract_command: + command = f'{extract_command} {remote_file} -C {remote_dir}' + stdin, stdout, stderr = client.exec_command(command) + # Create a class file object + output_file = io.StringIO() + + # Real-time prints execution and writes to a class file object + for line in stdout: + logger.info(line.strip()) + output_file.write(line) + for line in stderr: + logger.info(line.strip()) + output_file.write(line) + + exit_code = stdout.channel.recv_exit_status() + error = stderr.read().decode() + # Save the execution to a variable + output = output_file.getvalue() + client.close() + + if exit_code == 0: + logger.info(f"{server} execute: {command} success") + return output + else: + logger.info(error) + logger.error('{}{}{}'.format(termcolor.red, f"{server} execute: {command} failed", termcolor.end)) + raise DsmsDeployException(f"{server} execute: {command} failed") + + +def generate_remote_ssh_key(ssh_client): + """ + Generate an SSH key pair on the remote server + + :param ssh_client: remote server ssh client + :return: remote server public key + """ + + # check if the public key already exists on the remote server + _, stdout, _ = ssh_client.exec_command(f'ls {SSH_PATH}/id_rsa.pub') + public_key = stdout.read().decode().strip() + + if public_key: + # if the public key already exists, the current public key is read and returned + pass + else: + # if there is no public key, a new key pair is generated + _, stdout, stderr = ssh_client.exec_command( + f'ssh-keygen -t rsa -N "" -f {SSH_PATH}/id_rsa') + + # wait for the key to be generated + while not ssh_key_generated(ssh_client): + continue + + _, stdout, _ = ssh_client.exec_command(f'cat {SSH_PATH}/id_rsa.pub') + public_key = stdout.read().decode().strip() + return public_key + + +def ssh_key_generated(ssh_client): + """ + Check whether the SSH key is generated + + :param ssh_client:remote server ssh client + :return: True complete,False not yet + """ + _, stdout, _ = ssh_client.exec_command(f'ls {SSH_PATH}/id_rsa.pub') + public_key = stdout.read().decode().strip() + return bool(public_key) + + +def setup_ssh_trust(nodes): + for i, node in enumerate(nodes.values()): + try: + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_client.connect(hostname=node.ip, username=DEPLOY_USERNAME, password=node.root_passwd, allow_agent=False, + look_for_keys=False) + + # Check that the id_rsa.pub file exists, if so, read the public key directly + public_key = generate_remote_ssh_key(ssh_client) + if public_key: + logger.info(f'remote server :{node.hostname} Public key get success') + else: + raise DsmsDeployException(f"remote server {node.hostname} generate remote ssh key failed") + + # copy the leader node's public key to the other node's authorization file + for j, other_node in enumerate(nodes.values()): + try: + other_ssh = paramiko.SSHClient() + other_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + other_ssh.connect(other_node.ip, username=DEPLOY_USERNAME, + password=other_node.root_passwd, + allow_agent=False, + look_for_keys=False) + sftp = other_ssh.open_sftp() + # Check if remote file exists + try: + sftp.stat(f'{SSH_PATH}/authorized_keys') + except FileNotFoundError: + other_ssh.exec_command(f'mkdir -p {SSH_PATH}') + other_ssh.exec_command(f'touch {SSH_PATH}/authorized_keys') + authorized_keys = sftp.file(f'{SSH_PATH}/authorized_keys', 'a+') + authorized_keys.seek(0) + keys = authorized_keys.read().decode().splitlines() + # verify whether it has been added first + if public_key not in keys: + authorized_keys.write(public_key + '\n') + logger.info(f'remote server :{other_node.hostname} add {node.hostname} public key success') + else: + logger.info(f'remote server :{other_node.hostname} exist {node.hostname} public key') + authorized_keys.close() + sftp.close() + other_ssh.close() + except Exception as e: + raise DsmsDeployException(f"remote server :{other_node.hostname} add {node.hostname} public key failed") + + ssh_client.connect(hostname=node.ip, username=DEPLOY_USERNAME, + password=node.root_passwd, + allow_agent=False, look_for_keys=False) + # prevent confirmation on first login + _, stdout, _ = ssh_client.exec_command( + f'ssh -o StrictHostKeyChecking=no root@{other_node.hostname}') + except Exception as e: + raise DsmsDeployException(f'Cluster node {node.hostname} grant credit failed {node.hostname}: {str(e)}') + return True + + +def configure_cluster(nodes): + """ + Configure the server cluster so that each node trusts each other and adds the corresponding + + :param nodes: cluster nodes + :return: execute result + """ + logger.info("Config cluster nodes start") + + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + try: + for i, node in enumerate(nodes.values()): + ip = node.ip + hostname = node.hostname + + client.connect(ip, username=DEPLOY_USERNAME, password=node.root_passwd, + timeout=SSH_TIMEOUT, + allow_agent=False, look_for_keys=False) + logger.info(f"set {ip} hostname to {hostname}") + command = f"hostnamectl set-hostname {hostname}" + client.exec_command(command) + + # Add all hostnames and IPs to /etc/hosts + for s in nodes.values(): + command = f"bash -s {s.ip} {s.hostname}" + stdin, stdout, stderr = client.exec_command(command) + stdin.write(update_hosts) + stdin.flush() + client.close() + # grant credit to other nodes + logger.info("cluster nodes grant credit start") + result = setup_ssh_trust(nodes) + logger.info('cluster nodes grant credit end') + except Exception as e: + raise DsmsDeployException(f'Configure_cluster failed: {str(e)}') + logger.info("Config cluster nodes end") + return result + + +def deploy_node(node): + if node.ip: + logger.info(f'start deploy server: {node.hostname}') + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'mkdir -p {DEPLOY_PACKAGE}') + depend_list = [] + whl_list = [] + ceph_rpm_list = [] + if node.repo_url: + logger.info('use customer repo') + gpgcheck = 0 + gpgkey = '' + if node.repo_url: + if node.repo_key_file: + gpgcheck = 1 + if node.repo_key_file.startswith('http'): + gpgkey = 'gpgkey=' + node.repo_key_file + else: + run_ftp_command( + local_file=node.repo_key_file, + server=node.ip, + username=DEPLOY_USERNAME, + password=node.root_passwd, + remote_dir=DEPLOY_PACKAGE, + ) + filename = os.path.basename(node.repo_key_file) + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'sudo rpm --import {DEPLOY_PACKAGE}/{filename}') + command = f'echo -e "[dsmsrepo]\nname=dsms-deploy\nbaseurl={node.repo_url}\ngpgcheck={gpgcheck}\nenabled=1\n{gpgkey}" | sudo tee /etc/yum.repos.d/dsms-deploy.repo > /dev/null' + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, command) + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"sudo yum clean all && sudo yum makecache") + + with open(DEPLOY_PACKAGE + "/" + DEPEND_LIST, 'r') as f: + for line in f: + if line.startswith('depend='): + depend_list.append(line.strip().split('=')[1]) + elif line.startswith('whl='): + whl_list.append(line.strip().split('=')[1]) + elif line.startswith('ceph_rpm='): + ceph_rpm_list.append(line.strip().split('=')[1]) + + # install all depend + depend_without_ext_list = [] + for depend in depend_list: + depend_without_ext = depend.rsplit('.', 1)[0] + depend_without_ext_list.append(depend_without_ext) + for ceph_rpm in ceph_rpm_list: + ceph_rpm_without_ext = ceph_rpm.rsplit('.', 1)[0] + depend_without_ext_list.append(ceph_rpm_without_ext) + depends = ' '.join(str(depend) for depend in depend_without_ext_list) + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"yum install -y {depends}") + if node.leader: + for whl in whl_list: + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f"pip3 install {DEPLOY_PACKAGE}/{whl}") + + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, 'ceph -v') + + +def check_time_difference(nodes): + logger.info("Check servers time...") + + remote_times = {} + + for server in nodes.values(): + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + try: + ssh_client.connect(hostname=server.ip, username=DEPLOY_USERNAME, + password=server.root_passwd, timeout=SSH_TIMEOUT, + allow_agent=False, look_for_keys=False) + stdin, stdout, stderr = ssh_client.exec_command('date +%s') + remote_time = int(stdout.read().decode().strip()) + remote_times[server] = remote_time + except Exception as e: + raise DsmsDeployException(f"get {server.hostname} time failed: {str(e)}") + finally: + ssh_client.close() + + for server1 in remote_times: + for server2 in remote_times: + if server1 != server2: + time_difference = remote_times[server1] - remote_times[server2] + if time_difference > 1: + raise DsmsDeployException( + f"server time check failed. server1: {server1.hostname} server2: {server2.hostname}, time difference is: {time_difference}s") + logger.info("Check servers time pass") + + +def command_version(args): + return VERSION + + +def command_deploy(args): + logger.info('Starting deploy dsms-storage cluster...') + nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF) + + for node in nodes.values(): + deploy_node(node) + + logger.info('deploy dsms-storage cluster success') + + +def command_init(args): + logger.info("Initializing dsms-storage cluster...") + nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF) + check_time_difference(nodes) + configure_cluster(nodes) + leader = nodes.get(LEADER_NODE) + hostname = ' '.join([node.hostname for node in nodes.values()]) + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'mkdir -p {INIT_DIR}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, + f'cd {INIT_DIR};ceph-deploy new {hostname} --cluster-network={leader.cluster_network} --public-network={leader.public_network}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy --overwrite-conf config push {hostname}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mon create-initial') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy admin {hostname}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mgr create {hostname}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mds create {hostname}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph mgr module enable cephadm --force') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph mgr module enable prometheus --force') + time.sleep(20) # give some room to start + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph orch set backend cephadm') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph cephadm generate-key') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph cephadm get-pub-key > ~/ceph.pub') + + for node in nodes.values(): + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ssh-copy-id -f -i ~/ceph.pub root@{node.hostname}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, + f'scp /home/my-cluster/ceph.bootstrap-osd.keyring root@{node.hostname}:/var/lib/ceph/bootstrap-osd/ceph.keyring') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph orch host add {node.hostname}') + + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph orch apply prometheus') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph orch apply node-exporter') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph restful create-self-signed-cert') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph restful create-key admin') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph config set mon mon_allow_pool_delete true') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph config set global rbd_default_features 1') + time.sleep(5) # give some room to start + command_info('') + logger.info('init dsms-storage cluster success') + + +def command_info(args): + logger.info("Getting dsms-storage cluster information") + + nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF) + leader = nodes.get(LEADER_NODE) + mgr_services = run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph mgr services') + auth_key = run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, 'ceph restful list-keys') + parsed_data = json.loads(mgr_services) + mgr_service = parsed_data["restful"] + parsed_data = json.loads(auth_key) + admin_value = parsed_data["admin"] + + logger.info( + f"Get dsms-storage cluster information finish \n\tCluster server address: {mgr_service}\n\tCluster server admin key: {admin_value}") + + +def command_add_node(args): + logger.info("Adding node to dsms-storage...") + cluster_nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF) + add_nodes = read_add_conf(DEPLOY_PACKAGE + '/' + ADD_NODE_CONF) + + for node in add_nodes.values(): + deploy_node(node) + all_nodes = cluster_nodes.copy() + all_nodes.update(add_nodes) + check_time_difference(all_nodes) + configure_cluster(all_nodes) + leader = cluster_nodes.get(LEADER_NODE) + for add_node in add_nodes.values(): + logger.info(f"adding {add_node.hostname} to dsms-storage") + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, + f'cd {INIT_DIR};ceph-deploy --overwrite-conf config push {add_node.hostname}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy admin {add_node.hostname}') + if add_node.mon: + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mon create {add_node.hostname}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mgr create {add_node.hostname}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'cd {INIT_DIR};ceph-deploy mds create {add_node.hostname}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ssh-copy-id -f -i ~/ceph.pub root@{add_node.hostname}') + + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, + f'scp /home/my-cluster/ceph.bootstrap-osd.keyring root@{add_node.hostname}:/var/lib/ceph/bootstrap-osd/ceph.keyring') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph orch host add {add_node.hostname}') + logger.info(f"add {add_node.hostname} to dsms-storage complete") + logger.info("Added node to dsms-storage success") + + +def command_remove_node(args): + logger.info("Removing node from dsms-storage...") + cluster_nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF) + leader = cluster_nodes.get(LEADER_NODE) + for node in args.host: + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph-deploy mon destroy {node}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph-deploy purge {node}') + run_ssh_command(leader.ip, DEPLOY_USERNAME, leader.root_passwd, f'ceph-deploy purgedata {node}') + logger.info("Removed node from dsms-storage success") + + +def command_rm_cluster(args): + # Removing the dsms-storage cluster and uninstall package + logger.info('Removing the dsms-storage cluster...') + nodes = read_cluster_conf(DEPLOY_PACKAGE + '/' + CLUSTER_CONF) + + for node in nodes.values(): + logger.info(f'Starting remove node: {node.hostname}') + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'ceph-deploy purge {node.hostname}') + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'ceph-deploy purgedata {node.hostname}') + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'rm -rf {INIT_DIR}/*') + run_ssh_command(node.ip, DEPLOY_USERNAME, node.root_passwd, f'rm -rf {DEPLOY_PACKAGE}/*') + logger.info('Removed the dsms-storage cluster success') + + +def _get_parser(): + # type: () -> argparse.ArgumentParser + parser = argparse.ArgumentParser( + description='Bootstrap dsms storage.', + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + + subparsers = parser.add_subparsers(help='sub-command') + + parser_version = subparsers.add_parser('version', help='Show dsms-deploy version') + parser_version.set_defaults(func=command_version) + + parser_deploy = subparsers.add_parser('deploy', help='Deploy dsms-storage package') + parser_deploy.set_defaults(func=command_deploy) + + parser_init = subparsers.add_parser('init', help='Init dsms-storage cluster') + parser_init.set_defaults(func=command_init) + + parser_add_node = subparsers.add_parser('addNode', help='Add a node to an existing cluster') + parser_add_node.set_defaults(func=command_add_node) + + parser_remove_node = subparsers.add_parser('removeNode', help='Remove a node for an existing cluster') + parser_remove_node.set_defaults(func=command_remove_node) + parser_remove_node.add_argument( + 'host', + metavar='HOST', + nargs='+', + help='hosts to purge Ceph data from', + ) + + parser_rm_cluster = subparsers.add_parser('rmCluster', help='Remove all daemons for this cluster') + parser_rm_cluster.set_defaults(func=command_rm_cluster) + + parser_info = subparsers.add_parser('info', help='Get cluster info') + parser_info.set_defaults(func=command_info) + + return parser + + +def _parse_args(av): + parser = _get_parser() + args = parser.parse_args(av) + + return args + + +if __name__ == '__main__': + + # get args + try: + av = injected_argv # type: ignore + except NameError: + av = sys.argv[1:] + + if not av: + sys.stderr.write('No command specified; pass -h or --help for usage\n') + sys.exit(1) + # dsms_require_root() + + # init log + if not os.path.exists(LOG_DIR): + os.makedirs(LOG_DIR) + dictConfig(logging_config) + + logger = logging.getLogger() + + logger.info("dsms-deploy params %s" % av) + # parse args + args = _parse_args(av) + + try: + r = args.func(args) + except DsmsDeployException as e: + sys.stderr.write('ERROR: %s\n' % e) + sys.exit(1) + if not r: + r = 0 + sys.exit(r)