diff --git a/.cspell.json b/.cspell.json new file mode 100644 index 0000000..83f2ef7 --- /dev/null +++ b/.cspell.json @@ -0,0 +1,114 @@ +{ + "version": "0.2", + "language": "en", + "words": [ + "apache", + "apt", + "awk", + "aws", + "bashrc", + "cfg", + "cloudy", + "conf", + "config", + "DBSERVER", + "debian", + "django", + "docker", + "ec2", + "env", + "fabfile", + "firewall", + "geoip", + "git", + "github", + "grep", + "gunicorn", + "hostname", + "htop", + "http", + "https", + "ini", + "iostat", + "ip", + "isort", + "json", + "keypair", + "keypairs", + "localhost", + "maxmind", + "memcached", + "myapp", + "mypy", + "mysql", + "myuser", + "nginx", + "openvpn", + "passwordless", + "pgbouncer", + "pgis", + "pgpool", + "pip", + "postgis", + "postgresql", + "privs", + "psql", + "pyenv", + "pyproject", + "redis", + "sed", + "setuptools", + "ssh", + "sshfs", + "ssl", + "subcollection", + "subcollections", + "sudo", + "sudoer", + "sudoers", + "supervisor", + "systemctl", + "systemd", + "tcp", + "toml", + "ubuntu", + "udp", + "ufw", + "venv", + "vim", + "virtualenv", + "webdirs", + "wpuser", + "wsgi", + "xml", + "yaml", + "yml" + ], + "flagWords": [], + "ignorePaths": [ + ".venv/**", + "node_modules/**", + "dist/**", + "build/**", + "*.egg-info/**", + "__pycache__/**", + "*.pyc", + "*.pyo", + "*.log", + ".git/**" + ], + "overrides": [ + { + "filename": "**/*.py", + "languageId": "python" + }, + { + "filename": "**/*.sh", + "languageId": "shellscript" + }, + { + "filename": "**/*.md", + "languageId": "markdown" + } + ] +} diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..de428c3 --- /dev/null +++ b/.flake8 @@ -0,0 +1,15 @@ +[flake8] +max-line-length = 100 +extend-ignore = E203, W503, E501 +exclude = + .git, + __pycache__, + build, + dist, + .venv, + .eggs, + *.egg-info, + .pytest_cache, + .mypy_cache +per-file-ignores = + __init__.py:F401 \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..3918eee --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,35 @@ +name: Test + +on: + push: + branches: [ main, dev, "feat/*" ] + pull_request: + branches: [ main, dev ] + +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.10", "3.11"] + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Create virtual environment + run: | + python -m venv .venv + source .venv/bin/activate + pip install --upgrade pip + pip install -e . + + - name: Run tests + run: ./test.sh + + - name: Run linting + run: ./lint.sh \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 4469dd3..b3e80fe 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,4 +1,372 @@ { "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python", - "python.analysis.typeCheckingMode": "basic" + "python.analysis.typeCheckingMode": "basic", + "cSpell.words": [ + "pgbouncer", + "pgpool", + "pgis", + "fabfile", + "memcached", + "redis", + "nginx", + "apache", + "psql", + "mysql", + "postgresql", + "postgis", + "maxmind", + "geoip", + "sudo", + "sudoer", + "sudoers", + "keypair", + "keypairs", + "sshfs", + "privs", + "webdirs", + "subcollection", + "subcollections", + "venv", + "pyenv", + "pyproject", + "toml", + "openvpn", + "ufw", + "iptables", + "systemd", + "systemctl", + "ubuntu", + "debian", + "centos", + "rhel", + "awscli", + "boto", + "ec2", + "aws", + "cloudwatch", + "iam", + "vpc", + "s3", + "rds", + "elb", + "autoscaling", + "cloudformation", + "terraform", + "ansible", + "puppet", + "chef", + "saltstack", + "kubernetes", + "docker", + "containerd", + "dockerd", + "dockerfile", + "dockerhub", + "supervisor", + "supervisord", + "gunicorn", + "uwsgi", + "wsgi", + "asgi", + "django", + "flask", + "fastapi", + "celery", + "rabbitmq", + "elasticsearch", + "kibana", + "logstash", + "grafana", + "prometheus", + "influxdb", + "telegraf", + "nagios", + "zabbix", + "datadog", + "newrelic", + "rollbar", + "sentry", + "bugsnag", + "cloudflare", + "ssl", + "tls", + "https", + "http", + "tcp", + "udp", + "icmp", + "dns", + "dhcp", + "ntp", + "smtp", + "imap", + "pop3", + "ftp", + "sftp", + "ssh", + "scp", + "rsync", + "wget", + "curl", + "grep", + "sed", + "awk", + "vim", + "nano", + "emacs", + "tmux", + "screen", + "htop", + "iostat", + "netstat", + "ss", + "lsof", + "strace", + "tcpdump", + "wireshark", + "nmap", + "ngrep", + "iftop", + "iotop", + "dmesg", + "journalctl", + "logrotate", + "cron", + "crontab", + "systemctl", + "systemd", + "init", + "upstart", + "sysvinit", + "chkconfig", + "update", + "rc", + "apt", + "yum", + "dnf", + "zypper", + "pacman", + "homebrew", + "pip", + "conda", + "virtualenv", + "pipenv", + "poetry", + "setuptools", + "distutils", + "wheel", + "twine", + "pypi", + "github", + "gitlab", + "bitbucket", + "git", + "svn", + "hg", + "mercurial", + "bzr", + "cvs", + "repo", + "repos", + "config", + "configs", + "cfg", + "conf", + "json", + "yaml", + "yml", + "xml", + "ini", + "env", + "dotenv", + "bashrc", + "zshrc", + "profile", + "aliases", + "exports", + "functions", + "completions", + "hostname", + "localhost", + "fqdn", + "ip", + "ipv4", + "ipv6", + "cidr", + "netmask", + "gateway", + "router", + "switch", + "firewall", + "iptables", + "ufw", + "fail2ban", + "selinux", + "apparmor", + "grsecurity", + "pax", + "aslr", + "nx", + "dep", + "canary", + "stack", + "heap", + "buffer", + "overflow", + "underflow", + "segfault", + "coredump", + "backtrace", + "debugger", + "gdb", + "lldb", + "valgrind", + "sanitizer", + "asan", + "msan", + "tsan", + "ubsan", + "fuzzer", + "afl", + "libfuzzer", + "honggfuzz", + "perf", + "ftrace", + "dtrace", + "bpf", + "ebpf", + "kprobe", + "uprobe", + "tracepoint", + "profile", + "profiler", + "benchmark", + "microbenchmark", + "macrobenchmark", + "loadtest", + "stresstest", + "unittest", + "pytest", + "nose", + "tox", + "coverage", + "codecov", + "coveralls", + "sonarqube", + "sonarcloud", + "codeclimate", + "codefactor", + "codacy", + "deepsource", + "lgtm", + "snyk", + "whitesource", + "blackduck", + "veracode", + "checkmarx", + "fortify", + "bandit", + "safety", + "piprot", + "outdated", + "vulnerabilities", + "cve", + "nvd", + "mitre", + "owasp", + "sans", + "nist", + "iso", + "pci", + "dss", + "gdpr", + "hipaa", + "sox", + "compliance", + "audit", + "pentest", + "redteam", + "blueteam", + "purpleteam", + "threatmodel", + "riskassessment", + "incidentresponse", + "forensics", + "malware", + "antivirus", + "edr", + "siem", + "soar", + "iam", + "rbac", + "abac", + "saml", + "oauth", + "oidc", + "jwt", + "ldap", + "ad", + "kerberos", + "ntlm", + "radius", + "tacacs", + "mfa", + "totp", + "hotp", + "yubikey", + "fido", + "webauthn", + "passkey", + "biometric", + "fingerprint", + "faceauth", + "voiceauth", + "retina", + "iris", + "palm", + "vein", + "smartcard", + "pkcs", + "x509", + "pki", + "ca", + "crl", + "ocsp", + "csr", + "cer", + "crt", + "pem", + "der", + "p12", + "pfx", + "jks", + "keystore", + "truststore", + "cloudy", + "myuser", + "myapp", + "wpuser", + "mysite", + "neekware" + ], + "cSpell.enableFiletypes": [ + "python", + "bash", + "shellscript", + "markdown", + "yaml", + "json", + "dockerfile" + ], + "cSpell.ignorePaths": [ + ".venv/**", + "node_modules/**", + "dist/**", + "build/**", + "*.egg-info/**", + "__pycache__/**", + "*.pyc", + "*.pyo", + "*.log" + ] } diff --git a/CHANGELOG.md b/CHANGELOG.md index e7e41ef..40afce1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,24 +1,107 @@ -## 0.0.4 +# Changelog -Maintenance: +All notable changes to this project will be documented in this file. - - Upgrade to Ubuntu 18.04 STL - - Remove deprecated packages +## [Unreleased] -## 0.0.3 +### Major Changes - Command Structure Modernization -Enhancement: +**Breaking Changes:** +- ⚠️ **Complete command structure overhaul** - All command names simplified and reorganized +- ⚠️ **Recipe commands renamed**: `setup.*` → `recipe.*` with shorter names + - `setup.server` → `recipe.gen-install` + - `setup.cache` → `recipe.redis-install` + - `setup.database` → `recipe.psql-install` + - `setup.web` → `recipe.web-install` + - `setup.load-balancer` → `recipe.lb-install` + - `setup.vpn` → `recipe.vpn-install` + - `setup.standalone` → `recipe.sta-install` + +### Added + +**Infrastructure:** +- ✅ **Modern Python packaging** - Migrated from `setup.py` to `pyproject.toml` +- ✅ **Automated environment setup** - `./bootstrap.sh` script for quick setup +- ✅ **Comprehensive testing** - Minimal test suite in `tests/` directory +- ✅ **Code quality tools** - Black, isort, flake8, mypy integration via `./lint.sh` +- ✅ **Spell checking** - Comprehensive technical dictionary in `.cspell.json` +- ✅ **Global exception handling** - SSH authentication failure guidance + +**Command Organization:** +- ✅ **Hierarchical namespaces** - Clear command structure with intuitive grouping +- ✅ **127+ organized commands** - All functionality restored with simplified names +- ✅ **Enhanced help system** - Better command documentation and examples + +**Development Experience:** +- ✅ **Standardized virtual environment** - Using `.venv` consistently +- ✅ **Environment validation** - Scripts check for proper setup +- ✅ **Executable scripts** - All scripts use `#!/usr/bin/env` for portability +- ✅ **Test automation** - `./test.sh` for easy development testing + +### Changed + +**Command Structure:** +- 🔄 **Database commands** simplified: `db.pg.*`, `db.my.*`, `db.pgb.*`, etc. +- 🔄 **System commands** streamlined: `sys.*` with clear action names +- 🔄 **Web server commands** organized: `web.apache.*`, `web.nginx.*`, etc. +- 🔄 **Firewall commands** simplified: `fw.*` with intuitive names +- 🔄 **Service commands** grouped: `services.docker.*`, `services.cache.*`, etc. + +**Development Workflow:** +- 🔄 **Test runner** moved to `tests/test_runner.py` +- 🔄 **Linting modernized** - Black with 100-character line length +- 🔄 **Configuration updated** - Modern Python 3.11+ support + +### Technical Improvements - - Incremental update +**Code Quality:** +- ✅ **100% import coverage** - All modules properly importable +- ✅ **Function verification** - All Fabric tasks verified and working +- ✅ **Type checking** - Basic mypy configuration +- ✅ **Consistent formatting** - Black and isort integration +**Documentation:** +- ✅ **Complete README rewrite** - Modern examples and comprehensive usage +- ✅ **Updated CLAUDE.md** - Development workflow documentation +- ✅ **Example updates** - All examples use new command structure -## 0.0.2 +### Development Notes + +**Testing:** +```bash +./test.sh # Run full test suite +python tests/test_runner.py # Run tests directly +``` + +**Code Quality:** +```bash +./lint.sh # Run all linting tools +``` + +**Environment:** +```bash +./bootstrap.sh # Automated setup +source .venv/bin/activate # Manual activation +``` + +--- + +## [0.0.4] - Legacy + +Maintenance: +- Upgrade to Ubuntu 18.04 LTS +- Remove deprecated packages + +## [0.0.3] - Legacy Enhancement: +- Incremental update - - Incremental update +## [0.0.2] - Legacy +Enhancement: +- Incremental update -## 0.0.1 +## [0.0.1] - Legacy - - Initial version +- Initial version diff --git a/CLAUDE.md b/CLAUDE.md index afae6b3..d768587 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -5,31 +5,163 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## Development Commands ### Environment Setup + +**⚠️ CRITICAL**: Always use `.venv` (not `venv`) for the virtual environment! + ```bash -python3 -m venv venv -source venv/bin/activate +# Automated setup (recommended) +./bootstrap.sh + +# OR manual setup +python3 -m venv .venv +source .venv/bin/activate pip install -e . -cd cloudy +``` + +**Before any Python/Fabric commands, ALWAYS activate:** +```bash +source .venv/bin/activate ``` ### Core Development Commands - **List all Fabric tasks**: `fab -l` -- **Run tests**: `python test.py` -- **Run linting**: `./pep8.sh` (PEP8 compliance checking) +- **Run tests**: `./test.sh` (minimal test suite from `tests/` directory) +- **Run linting**: `./lint.sh` (Black, isort, flake8, mypy) +- **Verbose output**: `CLOUDY_VERBOSE=1 fab [command]` (shows all command output) +- **Debug mode**: `fab --debug [command]` (Fabric debug + all output) +- **Spell checking**: Configured via `.cspell.json` and `.vscode/settings.json` - **Publish package**: `python setup.py publish` +### Secure Server Management + +**⚠️ IMPORTANT**: After running `recipe.gen-install`, root login is disabled for security. + +**⚠️ CRITICAL - Sudo Password Requirements**: + +Due to underlying issues with Fabric, Python Cloudy does NOT support interactive password prompts. For any sudo operations, you MUST export the password as an environment variable: + +```bash +# REQUIRED: Set sudo password via environment variable +export INVOKE_SUDO_PASSWORD=admin_user_password + +# Then run commands normally +fab -H admin@server:port command +``` + +This applies to ALL non-root operations including: +- System administration (`sys.*`) +- Database management (`db.*`) +- Web server operations (`web.*`) +- Service management (`services.*`) +- Firewall configuration (`fw.*`) + +**Alternative: Fabric Built-in Password Prompts**: +Fabric provides command-line options for password prompting, though these may not work reliably with Python Cloudy: + +```bash +# SSH authentication password prompt +fab --prompt-for-login-password -H user@server command + +# Sudo password prompt +fab --prompt-for-sudo-password -H user@server command +``` + +**⚠️ WARNING**: These Fabric options may not work consistently due to underlying Fabric issues. The environment variable approach (`INVOKE_SUDO_PASSWORD`) is the recommended and reliable method. + +**Complete Secure Workflow Example**: +```bash +# 1. Setup secure server (disables root, creates admin user with SSH keys) +source .venv/bin/activate +fab -H root@10.10.10.198 recipe.gen-install --cfg-file=./.cloudy.generic + +# 2. After setup, connect as admin user with sudo access +export INVOKE_SUDO_PASSWORD=pass4admin +fab -H admin@10.10.10.198:22022 web.nginx.install +fab -H admin@10.10.10.198:22022 db.pg.install +fab -H admin@10.10.10.198:22022 fw.allow-http + +# 3. Use verbose/debug flags to control output +CLOUDY_VERBOSE=1 fab -H admin@10.10.10.198:22022 db.pg.status # Show all output +fab -H admin@10.10.10.198:22022 --debug fw.status # Show debug info + all output +fab -H admin@10.10.10.198:22022 --echo sys.services # Echo commands + smart output +fab -H admin@10.10.10.198:22022 sys.services # Smart output (hides install noise) +``` + +**Security Features**: +- ✅ Root login disabled (`PermitRootLogin no`) +- ✅ Admin user with SSH key authentication +- ✅ Custom SSH port (default: 22022) +- ✅ UFW firewall configured +- ✅ Password + sudo access for privileged operations + +**Smart Output System**: +- ✅ **Default**: Hides noisy installation commands, shows status/informational commands +- ✅ **CLOUDY_VERBOSE=1**: Shows all command output (environment variable) +- ✅ **--debug/-d**: Shows debug information and all output (Fabric built-in) +- ✅ **--echo/-e**: Echo commands before running (Fabric built-in) +- ✅ **Always Shown**: `ufw status`, `df`, `ps`, `systemctl status`, `pg_lsclusters`, etc. +- ✅ **Hidden by Default**: `apt install`, `wget`, `make`, `pip install`, `pg_createcluster`, `createdb`, etc. + +**Recipe Success Messages**: +- ✅ **Comprehensive Summaries**: All recipes show detailed configuration summaries upon completion +- ✅ **Visual Indicators**: 🎉 ✅ success icons and 🚀 ready-to-use messages +- ✅ **Configuration Details**: Ports, addresses, users, versions, firewall rules +- ✅ **Next Steps**: Connection information and usage guidance +- ✅ **Consistent Format**: Standardized success output across all recipe types + ### Fabric Command Patterns + ```bash -# Execute system commands -fab -H user@host:port core.sys-uname +# High-level server deployment (one command setups) +fab recipe.gen-install --cfg-file=./.cloudy.production +fab recipe.psql-install --cfg-file=./.cloudy.production +fab recipe.web-install --cfg-file=./.cloudy.production +fab recipe.redis-install --cfg-file=./.cloudy.production +fab recipe.lb-install --cfg-file=./.cloudy.production -# Run recipes with single config -fab recipe-generic-server.setup-server --cfg-file=./.cloudy.generic +# Database operations +fab db.pg.create-user --username=webapp --password=secure123 +fab db.pg.create-db --database=myapp --owner=webapp +fab db.pg.dump --database=myapp +fab db.my.create-user --username=webapp --password=secure123 -# Run recipes with multiple configs -fab -H root@host recipe-webserver-django.setup-web --cfg-file=./base.cfg,./web.cfg +# System administration +fab sys.hostname --hostname=myserver.com +fab sys.add-user --username=admin +fab sys.ssh-port --port=2222 +fab sys.timezone --timezone=America/New_York + +# Security & Firewall +fab fw.install +fab fw.secure-server --ssh-port=2222 +fab fw.allow-http +fab fw.allow-https +fab security.install-common + +# Services +fab services.cache.install +fab services.cache.configure +fab services.docker.install +fab services.docker.add-user --username=myuser + +# Get help +fab help # Show all command categories with examples ``` +#### Command Categories +- **recipe.***: One-command server deployment recipes (7 commands) +- **sys.***: System configuration (hostname, users, SSH, timezone) (31 commands) +- **db.pg.***: PostgreSQL operations (create, backup, users) (17 commands) +- **db.my.***: MySQL operations (create, backup, users) (7 commands) +- **db.pgb.***: PgBouncer connection pooling (3 commands) +- **db.pgp.***: PgPool load balancing (2 commands) +- **db.gis.***: PostGIS spatial database extensions (4 commands) +- **fw.***: Firewall configuration (9 commands) +- **security.***: Security hardening (1 command) +- **services.***: Service management (Docker, Redis, Memcached, VPN) (17 commands) +- **web.***: Web server management (Apache, Nginx, Supervisor) (13 commands) +- **aws.***: Cloud management (EC2) (16 commands) + ## Architecture Overview ### Module Structure @@ -55,7 +187,7 @@ git-user-email = john@example.com timezone = America/New_York admin-user = admin hostname = my-server -python-version = 3.8 +python-version = 3.11 [WEBSERVER] webserver = gunicorn @@ -88,11 +220,12 @@ def setup_server(c: Context, cfg_file=None): ## Development Requirements - **Python**: ≥3.8 -- **Key Dependencies**: +- **Key Dependencies** (defined in `pyproject.toml` and `requirements.txt`): - Fabric ≥3.2.2 (SSH automation) - apache-libcloud ≥3.8.0 (cloud provider abstraction) - colorama ≥0.4.6 (colored terminal output) - s3cmd ≥2.4.0 (S3 management) + - Development tools: Black, isort, flake8, mypy ## Working with Configurations diff --git a/LICENSE b/LICENSE index 82af695..1288fd7 100644 --- a/LICENSE +++ b/LICENSE @@ -19,3 +19,4 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/README.md b/README.md index 38eb900..1e4f930 100644 --- a/README.md +++ b/README.md @@ -1,53 +1,427 @@ # Python Cloudy -**A Python utility that simplifies cloud configuration** +**Infrastructure automation toolkit for cloud and server management** --- ## Overview -Python Cloudy is a utility to simplify server cloud configuration and automation. +Python Cloudy is a comprehensive infrastructure automation toolkit that simplifies server configuration, database management, web server setup, and cloud deployments. Built on top of Fabric, it provides over 120 organized commands for system administration and DevOps workflows. + +### Key Features + +- 🚀 **High-level deployment recipes** for complete server setups +- 🗄️ **Database automation** (PostgreSQL, MySQL, Redis, Memcached) +- 🌐 **Web server management** (Apache, Nginx, Supervisor) +- ☁️ **Cloud integration** (AWS EC2) +- 🔒 **Security & firewall** configuration +- 🔧 **System administration** utilities --- -## Installation +## Quick Start + +### Automated Setup (Recommended) +```bash +git clone https://github.com/un33k/python-cloudy +cd python-cloudy +./bootstrap.sh +``` -1. `python3 -m venv venv` -2. `source venv/bin/activate` -3. `git clone https://github.com/un33k/python-cloudy` -4. `pip install -e python-cloudy` -5. `cd python-cloudy/cloudy` -6. `fab -l` -7. *(Optional)* Create a `~/.cloudy` file based on the example in the `cfg` directory. +### Manual Setup +```bash +python3 -m venv .venv +source .venv/bin/activate +pip install -e . +``` + +### Verify Installation +```bash +source .venv/bin/activate +fab -l +``` --- -## Usage - -- List all commands: - ``` - fab -l - ``` -- Run a command: - ``` - fab -H auto@10.10.10.198:22022 -i ~/.ssh/id_rsa core.sys-uname - ``` - ``` - fab recipe-generic-server.setup-server --cfg-file=./.cloudy.generic - ``` - ``` - fab -H root@10.10.10.198 recipe-generic-server.setup-server --cfg-file=./.cloudy.generic,./.cloudy.admin - ``` -- *(...etc.)* +## Important: Sudo Password Configuration + +**⚠️ CRITICAL**: Due to underlying issues with Fabric, Python Cloudy requires explicit sudo password configuration since interactive password prompts are not supported in automated deployments. + +For any operations requiring sudo privileges (when not running as root), you must export the sudo password as an environment variable: + +```bash +export INVOKE_SUDO_PASSWORD=your_sudo_password +``` + +This applies to all non-root operations including: +- System administration tasks +- Package installations +- Service management +- Configuration file updates --- -## Running the Tests +## Output Control & Debugging + +Python Cloudy features a smart output system that provides clean, professional command execution while maintaining full debugging capabilities when needed. + +**⚡ Quick Start**: Use `CLOUDY_VERBOSE=1` before any command to see full output: +```bash +# Example: PostgreSQL installation with verbose output +CLOUDY_VERBOSE=1 fab -H root@10.10.10.198 recipe.psql-install --cfg-paths="./.cloudy.psql" + +# Example: System update with verbose output +CLOUDY_VERBOSE=1 fab -H admin@server:22022 sys.update +``` + +### Output Control Modes + +#### Default Mode (Smart Output) +By default, Python Cloudy intelligently categorizes commands: +- **Shows**: Status commands (`ufw status`, `systemctl status`, `df`, `ps`, etc.) +- **Hides**: Noisy installation commands (`apt install`, `wget`, `make`, `pip install`, `pg_createcluster`, etc.) +- **Indicators**: Success (✅) or failure (❌) messages for hidden commands + +```bash +# Clean output - hides installation noise, shows status information +fab -H admin@server:22022 db.pg.install +fab -H admin@server:22022 fw.status # Always shows output +``` + +#### Verbose Mode +Shows all command output for debugging and troubleshooting. **Note: Fabric does not have a `--verbose` flag**, so use the environment variable: + +```bash +# Show all command output using environment variable +export CLOUDY_VERBOSE=1 +fab -H admin@server:22022 db.pg.install +fab -H admin@server:22022 sys.update + +# Or inline for single command +CLOUDY_VERBOSE=1 fab -H admin@server:22022 db.pg.install + +# Clear verbose mode +unset CLOUDY_VERBOSE +``` + +#### Debug Mode +Enables Fabric's built-in debug mode plus all command output: + +```bash +# Enable debug mode with full output +fab -H admin@server:22022 --debug db.pg.install +fab -H admin@server:22022 -d fw.secure-server +``` -To run the tests against the current environment: +#### Echo Mode +Echo commands before execution (Fabric built-in): +```bash +# Echo commands before running +fab -H admin@server:22022 --echo sys.hostname --hostname=myserver +fab -H admin@server:22022 -e web.nginx.install ``` -python test.py + +### Recipe Success Messages + +All deployment recipes provide comprehensive success summaries: + +```bash +fab -H root@server recipe.gen-install --cfg-file="./.cloudy.generic" +``` + +**Example Output:** +``` +🎉 ✅ GENERIC SERVER SETUP COMPLETED SUCCESSFULLY! +📋 Configuration Summary: + ├── Hostname: myserver.example.com + ├── Timezone: America/New_York + ├── Admin User: admin (groups: admin,www-data) + ├── SSH Port: 22022 + ├── Root Login: Disabled + ├── SSH Keys: Configured + └── Firewall: UFW enabled and configured + +🚀 Generic server foundation is ready for specialized deployments! + └── SSH Access: admin@server:22022 (key-based authentication) +``` + +### Environment Variables + +For programmatic control, you can use environment variables: + +```bash +# Enable verbose output via environment variable (RECOMMENDED) +export CLOUDY_VERBOSE=1 +fab -H server sys.update + +# Or use inline for single commands +CLOUDY_VERBOSE=1 fab -H server sys.update + +# Clear verbose mode +unset CLOUDY_VERBOSE +``` + +**Note**: `CLOUDY_VERBOSE=1` is the recommended way to enable verbose output since Fabric does not have a built-in `--verbose` flag. + +### Best Practices + +- **Development**: Use `CLOUDY_VERBOSE=1` or `--debug` when troubleshooting issues +- **Production**: Use default mode for clean output and success confirmations +- **Automation**: Set `CLOUDY_VERBOSE=1` in CI/CD environments for full logs +- **Learning**: Use `--echo` to see exact commands being executed + +### Quick Reference + +```bash +# Default mode (clean output) +fab -H server sys.update + +# Verbose mode (show all output) +CLOUDY_VERBOSE=1 fab -H server sys.update + +# Debug mode (Fabric debug + all output) +fab -H server --debug sys.update + +# Echo mode (show commands before running) +fab -H server --echo sys.update +``` + +--- + +## Usage Examples + +### Secure Server Deployment Workflow + +**⚠️ IMPORTANT**: Python Cloudy implements a secure two-phase deployment: + +#### Phase 1: Initial Setup (as root) +```bash +# Setup secure server - creates admin user, disables root login, configures firewall +source .venv/bin/activate +fab -H root@10.10.10.198 recipe.gen-install --cfg-file="./.cloudy.generic" +``` + +**After this step:** +- ✅ Root login is disabled for security +- ✅ Admin user created with SSH key authentication +- ✅ SSH port changed (default: 22022) +- ✅ UFW firewall configured + +#### Phase 2: Ongoing Management (as admin user) + +**⚠️ CRITICAL**: Due to underlying Fabric issues, for any sudo operations, you must export the password as an environment variable since interactive password prompts are not supported: + +```bash +# Set sudo password for automation (REQUIRED for sudo operations) +export INVOKE_SUDO_PASSWORD=your_admin_password + +# Install additional services (Nginx, PostgreSQL, etc.) +fab -H admin@10.10.10.198:22022 web.nginx.install +fab -H admin@10.10.10.198:22022 db.pg.install +fab -H admin@10.10.10.198:22022 fw.allow-http +fab -H admin@10.10.10.198:22022 fw.allow-https + +# Use environment variable for verbose output or --debug flag +CLOUDY_VERBOSE=1 fab -H admin@10.10.10.198:22022 db.pg.status +fab -H admin@10.10.10.198:22022 --debug fw.status +``` + +### Other High-Level Recipes +```bash +# Redis cache server setup +fab -H root@server.com recipe.redis-install --cfg-file="./.cloudy.redis" + +# Django web server setup +fab -H root@web.com recipe.web-install --cfg-file="./.cloudy.web" + +# PostgreSQL + PostGIS database setup +fab -H root@db.com recipe.psql-install --cfg-file="./.cloudy.db" + +# Use environment variable to see detailed installation progress +CLOUDY_VERBOSE=1 fab -H root@server.com recipe.redis-install --cfg-file="./.cloudy.redis" +``` + +### Database Management +```bash +# PostgreSQL operations +fab -H root@db.com db.pg.create-user --username=myuser --password=secure123 +fab -H root@db.com db.pg.create-db --database=myapp --owner=myuser +fab -H root@db.com db.pg.grant-privs --database=myapp --username=myuser + +# MySQL operations +fab -H root@db.com db.my.install +fab -H root@db.com db.my.create-db --database=wordpress +fab -H root@db.com db.my.create-user --username=wpuser --password=pass123 +``` + +### System Administration +```bash +# For non-root users, export sudo password first +export INVOKE_SUDO_PASSWORD=your_sudo_password + +# System setup and updates +fab -H root@server.com sys.init +fab -H root@server.com sys.update +fab -H root@server.com sys.hostname --hostname=myserver.example.com + +# User management +fab -H root@server.com sys.add-user --username=deploy +fab -H root@server.com sys.add-sudoer --username=deploy + +# SSH configuration +fab -H root@server.com sys.ssh-port --port=2222 +fab -H root@server.com sys.ssh-disable-root + +# Use --echo to see exact commands being executed +fab -H root@server.com --echo sys.hostname --hostname=myserver.example.com + +# System status checks (always show output) +fab -H admin@server:22022 sys.services # Shows service status +fab -H admin@server:22022 sys.memory-usage # Shows memory info +``` + +### Firewall & Security +```bash +# Firewall setup +fab -H root@server.com fw.install +fab -H root@server.com fw.secure-server --ssh-port=2222 +fab -H root@server.com fw.allow-http +fab -H root@server.com fw.allow-https +fab -H root@server.com fw.allow-postgresql +``` + +### Web Server Setup +```bash +# Nginx setup +fab -H root@web.com web.nginx.install +fab -H root@web.com web.nginx.setup-domain --domain=example.com --proto=https + +# Apache setup +fab -H root@web.com web.apache.install +fab -H root@web.com web.apache.configure-domain --domain=mysite.com + +# Site management +fab -H root@web.com web.site.create-site-dir --domain=example.com +fab -H root@web.com web.site.create-venv --domain=example.com +``` + +### Cloud Management (AWS) +```bash +# EC2 instance management +fab aws.list-nodes +fab aws.create-node --name=web-server --image=ami-12345 --size=t3.micro +fab aws.get-node --name=web-server +fab aws.destroy-node --name=web-server +``` + +### Service Management +```bash +# Docker setup +fab -H root@server.com services.docker.install +fab -H root@server.com services.docker.add-user --username=deploy + +# Redis configuration +fab -H root@server.com services.cache.install +fab -H root@server.com services.cache.configure +fab -H root@server.com services.cache.password --password=redis123 + +# VPN setup +fab -H root@vpn.com services.vpn.docker-install +fab -H root@vpn.com services.vpn.create-client --name=user1 +``` + +--- + +## Command Structure + +Python Cloudy organizes commands into intuitive hierarchical namespaces: + +- **`recipe.*`** - High-level deployment recipes (7 commands) +- **`sys.*`** - System administration (31 commands) +- **`db.*`** - Database management (31 commands) + - `db.pg.*` - PostgreSQL (17 commands) + - `db.my.*` - MySQL (7 commands) + - `db.pgb.*` - PgBouncer (3 commands) + - `db.pgp.*` - PgPool (2 commands) + - `db.gis.*` - PostGIS (4 commands) +- **`web.*`** - Web servers (13 commands) +- **`fw.*`** - Firewall (9 commands) +- **`services.*`** - Service management (17 commands) +- **`aws.*`** - Cloud management (16 commands) + +### Global Flags (Available for any command) + +- **`--debug, -d`** - Enable Fabric debug mode + all output +- **`--echo, -e`** - Echo commands before running (Fabric built-in) +- **`CLOUDY_VERBOSE=1`** - Environment variable for verbose output + +### List All Commands +```bash +fab -l # Show all available commands +fab -l | grep "recipe\." # Show only recipe commands +fab -l | grep "db\.pg\." # Show only PostgreSQL commands + +# Get help for Python Cloudy features +fab help # Show comprehensive help with examples +``` + +--- + +## Configuration + +Python Cloudy uses hierarchical configuration files with INI format: + +### Configuration Precedence (lowest to highest) +1. `cloudy/cfg/defaults.cfg` - Built-in defaults +2. `~/.cloudy` - User home directory config +3. `./.cloudy` - Current working directory config +4. `--cfg-file` - Explicitly passed files + +### Example Configuration +```ini +[COMMON] +git-user-full-name = John Doe +git-user-email = john@example.com +timezone = America/New_York +admin-user = admin +hostname = my-server +python-version = 3.11 + +[WEBSERVER] +webserver = gunicorn +webserver-port = 8080 +domain-name = example.com + +[DBSERVER] +pg-version = 17 +db-host = localhost +db-port = 5432 +``` + +### Using Multiple Configs +```bash +fab -H root@server.com recipe.gen-install --cfg-file="./.cloudy.base,./.cloudy.production" +``` + +--- + +## Development + +### Running Tests +```bash +./test.sh # Run minimal test suite +python tests/test_runner.py # Run tests directly +``` + +### Code Quality +```bash +./lint.sh # Run linting (Black, isort, flake8, mypy) +``` + +### Environment Setup +```bash +source .venv/bin/activate # Always activate before development ``` --- diff --git a/bootstrap.sh b/bootstrap.sh index e8db42d..7d4121d 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Python Cloudy Bootstrap Script # Sets up Python 3.11.9 via pyenv and creates virtual environment diff --git a/cloudy/__init__.py b/cloudy/__init__.py index d2e2697..04a1e5f 100644 --- a/cloudy/__init__.py +++ b/cloudy/__init__.py @@ -1,2 +1,2 @@ -__version__ = '0.0.5' -__author__ = 'Val Neekman' +__version__ = "0.0.5" +__author__ = "Val Neekman" diff --git a/cloudy/aws/ec2.py b/cloudy/aws/ec2.py index a5b511b..4777876 100644 --- a/cloudy/aws/ec2.py +++ b/cloudy/aws/ec2.py @@ -2,46 +2,56 @@ import time from fabric import task -from cloudy.util.context import Context -from cloudy.util.conf import CloudyConfig -from libcloud.compute.types import Provider, NodeState -from libcloud.compute.providers import get_driver from libcloud.compute.base import Node +from libcloud.compute.providers import get_driver +from libcloud.compute.types import NodeState, Provider + +from cloudy.util.conf import CloudyConfig +from cloudy.util.context import Context + def util_print_node(node: Node | None) -> None: if node: - print(', '.join([ - 'name: ' + node.name, - 'status: ' + util_get_state2string(node.state), - 'image: ' + node.extra.get('imageId', ''), - 'zone: ' + node.extra.get('availability', ''), - 'key: ' + node.extra.get('keyname', ''), - 'size: ' + node.extra.get('instancetype', ''), - 'pub ip: ' + str(node.public_ips) - ]), file=sys.stderr) + print( + ", ".join( + [ + "name: " + node.name, + "status: " + util_get_state2string(node.state), + "image: " + node.extra.get("imageId", ""), + "zone: " + node.extra.get("availability", ""), + "key: " + node.extra.get("keyname", ""), + "size: " + node.extra.get("instancetype", ""), + "pub ip: " + str(node.public_ips), + ] + ), + file=sys.stderr, + ) + def util_get_state2string(state: NodeState) -> str: compute_state_map = { - NodeState.RUNNING: 'running', - NodeState.REBOOTING: 'rebooting', - NodeState.TERMINATED: 'terminated', - NodeState.PENDING: 'pending', - NodeState.UNKNOWN: 'unknown', + NodeState.RUNNING: "running", + NodeState.REBOOTING: "rebooting", + NodeState.TERMINATED: "terminated", + NodeState.PENDING: "pending", + NodeState.UNKNOWN: "unknown", } - return compute_state_map.get(state, 'unknown') + return compute_state_map.get(state, "unknown") + def util_get_connection(c: Context): try: cfg = CloudyConfig() - ACCESS_ID = (cfg.cfg_grid['AWS']['access_id'] or '').strip() - SECRET_KEY = (cfg.cfg_grid['AWS']['secret_key'] or '').strip() + ACCESS_ID = (cfg.cfg_grid["AWS"]["access_id"] or "").strip() + SECRET_KEY = (cfg.cfg_grid["AWS"]["secret_key"] or "").strip() except Exception: - c.abort('Unable to read ACCESS_ID, SECRET_KEY') + c.abort("Unable to read ACCESS_ID, SECRET_KEY") Driver = get_driver(Provider.EC2) conn = Driver(ACCESS_ID, SECRET_KEY) return conn + def util_wait_till_node(c: Context, name: str, state: NodeState, timeout: int = 10) -> Node | None: node = None elapsed = 0 @@ -54,12 +64,15 @@ def util_wait_till_node(c: Context, name: str, state: NodeState, timeout: int = elapsed += frequency return node + def util_wait_till_node_destroyed(c: Context, name: str, timeout: int = 15) -> Node | None: return util_wait_till_node(c, name, NodeState.TERMINATED, timeout) + def util_wait_till_node_running(c: Context, name: str, timeout: int = 15) -> Node | None: return util_wait_till_node(c, name, NodeState.RUNNING, timeout) + @task @Context.wrap_context def util_list_instances(c: Context): @@ -68,85 +81,113 @@ def util_list_instances(c: Context): print(nodes, file=sys.stderr) return nodes + @task @Context.wrap_context def aws_list_sizes(c: Context): - """ List node sizes - Ex: (cmd)""" + """List node sizes - Ex: (cmd)""" conn = util_get_connection(c) sizes = sorted([i for i in conn.list_sizes()], key=lambda x: x.ram) for i in sizes: - print(' - '.join([i.id, str(i.ram), str(i.price)]), file=sys.stderr) + print(" - ".join([i.id, str(i.ram), str(i.price)]), file=sys.stderr) + @task @Context.wrap_context def aws_get_size(c: Context, size: str) -> object | None: - """ Get Node Size - Ex: (cmd:)""" + """Get Node Size - Ex: (cmd:)""" conn = util_get_connection(c) sizes = [i for i in conn.list_sizes()] if size: for i in sizes: if str(i.ram) == size or i.id == size: - print(' - '.join([i.id, str(i.ram), str(i.price)]), file=sys.stderr) + print(" - ".join([i.id, str(i.ram), str(i.price)]), file=sys.stderr) return i return None + @task @Context.wrap_context def aws_list_images(c: Context): - """ List available images - Ex: (cmd)""" + """List available images - Ex: (cmd)""" conn = util_get_connection(c) images = sorted([i for i in conn.list_images()], key=lambda x: x.id) for i in images: - print(' - '.join([i.id, i.name]), file=sys.stderr) + print(" - ".join([i.id, i.name]), file=sys.stderr) + @task @Context.wrap_context def aws_get_image(c: Context, name: str) -> object | None: - """ Confirm if a node exists - Ex: (cmd:)""" + """Confirm if a node exists - Ex: (cmd:)""" conn = util_get_connection(c) images = [i for i in conn.list_images()] if name: for i in images: if name == i.id: - print(' - '.join([i.id, i.name]), file=sys.stderr) + print(" - ".join([i.id, i.name]), file=sys.stderr) return i return None + @task @Context.wrap_context def aws_list_locations(c: Context): - """ List available locations - Ex: (cmd) """ + """List available locations - Ex: (cmd)""" conn = util_get_connection(c) locations = sorted([i for i in conn.list_locations()], key=lambda x: x.id) for i in locations: - print(' - '.join([getattr(i, "availability_zone", type('', (), {"name": ""})()).name, i.id, i.name, i.country]), file=sys.stderr) + print( + " - ".join( + [ + getattr(i, "availability_zone", type("", (), {"name": ""})()).name, + i.id, + i.name, + i.country, + ] + ), + file=sys.stderr, + ) + @task @Context.wrap_context def aws_get_location(c: Context, name: str) -> object | None: - """ Confirm if a location exists - Ex: (cmd:)""" + """Confirm if a location exists - Ex: (cmd:)""" conn = util_get_connection(c) locations = sorted([i for i in conn.list_locations()], key=lambda x: x.id) if name: for i in locations: - if getattr(i, "availability_zone", type('', (), {"name": ""})()).name == name: - print(' - '.join([getattr(i, "availability_zone", type('', (), {"name": ""})()).name, i.id, i.name, i.country]), file=sys.stderr) + if getattr(i, "availability_zone", type("", (), {"name": ""})()).name == name: + print( + " - ".join( + [ + getattr(i, "availability_zone", type("", (), {"name": ""})()).name, + i.id, + i.name, + i.country, + ] + ), + file=sys.stderr, + ) return i return None + @task @Context.wrap_context def aws_list_security_groups(c: Context): - """ List available security groups - Ex: (cmd)""" + """List available security groups - Ex: (cmd)""" conn = util_get_connection(c) groups = sorted([i for i in conn.ex_list_security_groups()]) for i in groups: print(i, file=sys.stderr) + @task @Context.wrap_context def aws_security_group_found(c: Context, name: str) -> bool: - """ Confirm if a security group exists - Ex: (cmd:) """ + """Confirm if a security group exists - Ex: (cmd:)""" conn = util_get_connection(c) groups = sorted([i for i in conn.ex_list_security_groups()]) if name: @@ -156,19 +197,21 @@ def aws_security_group_found(c: Context, name: str) -> bool: return True return False + @task @Context.wrap_context def aws_list_keypairs(c: Context): - """ List all available keypairs - Ex: (cmd)""" + """List all available keypairs - Ex: (cmd)""" conn = util_get_connection(c) nodes = sorted([i for i in conn.ex_describe_all_keypairs()]) for i in nodes: print(i, file=sys.stderr) + @task @Context.wrap_context def aws_keypair_found(c: Context, name: str) -> bool: - """ Confirm if a keypair exists - Ex: (cmd:) """ + """Confirm if a keypair exists - Ex: (cmd:)""" conn = util_get_connection(c) keys = sorted([i for i in conn.ex_describe_all_keypairs()]) for i in keys: @@ -177,19 +220,21 @@ def aws_keypair_found(c: Context, name: str) -> bool: return True return False + @task @Context.wrap_context def aws_list_nodes(c: Context): - """ List all available computing nodes - Ex: (cmd)""" + """List all available computing nodes - Ex: (cmd)""" conn = util_get_connection(c) nodes = sorted([i for i in conn.list_nodes()], key=lambda x: x.name) for i in nodes: util_print_node(i) + @task @Context.wrap_context def aws_get_node(c: Context, name: str) -> Node | None: - """ Confirm if a computing node exists - Ex: (cmd:) """ + """Confirm if a computing node exists - Ex: (cmd:)""" conn = util_get_connection(c) nodes = sorted([i for i in conn.list_nodes()], key=lambda x: x.name) for i in nodes: @@ -198,89 +243,91 @@ def aws_get_node(c: Context, name: str) -> Node | None: return i return None + @task @Context.wrap_context def aws_create_node( - c: Context, - name: str, - image: str, - size: str, - security: str, - key: str, - timeout: int = 30 + c: Context, name: str, image: str, size: str, security: str, key: str, timeout: int = 30 ) -> Node | None: - """ Create a node - Ex: (cmd:,,,[security],[key],[timeout]) """ + """Create a node - Ex: (cmd:,,,[security],[key],[timeout])""" conn = util_get_connection(c) if aws_get_node(c, name): - c.abort(f'Node already exists ({name})') + c.abort(f"Node already exists ({name})") size_obj = aws_get_size(c, size) if not size_obj: - c.abort(f'Invalid size ({size})') + c.abort(f"Invalid size ({size})") if not aws_security_group_found(c, security): - c.abort(f'Invalid security group ({security})') + c.abort(f"Invalid security group ({security})") if not aws_keypair_found(c, key): - c.abort(f'Invalid key ({key})') + c.abort(f"Invalid key ({key})") image_obj = aws_get_image(c, image) if not image_obj: - c.abort(f'Invalid image ({image})') + c.abort(f"Invalid image ({image})") - node = conn.create_node(name=name, image=image_obj, size=size_obj, ex_securitygroup=security, ex_keyname=key) + node = conn.create_node( + name=name, image=image_obj, size=size_obj, ex_securitygroup=security, ex_keyname=key + ) if not node: - c.abort(f'Failed to create node (name:{name}, image:{image}, size:{size})') + c.abort(f"Failed to create node (name:{name}, image:{image}, size:{size})") node = util_wait_till_node_running(c, name) util_print_node(node) return node + @task @Context.wrap_context def aws_destroy_node(c: Context, name: str, timeout: int = 30) -> None: - """ Destroy a computing node - Ex (cmd:)""" + """Destroy a computing node - Ex (cmd:)""" node = aws_get_node(c, name) if not node: - c.abort(f'Node does not exist or terminated ({name})') + c.abort(f"Node does not exist or terminated ({name})") if node.destroy(): node = util_wait_till_node_destroyed(c, name, timeout) if node: - print(f'Node is destroyed ({name})', file=sys.stderr) + print(f"Node is destroyed ({name})", file=sys.stderr) else: - print(f'Node is being destroyed ({name})', file=sys.stderr) + print(f"Node is being destroyed ({name})", file=sys.stderr) else: - c.abort(f'Failed to destroy node ({name})') + c.abort(f"Failed to destroy node ({name})") + @task @Context.wrap_context -def aws_create_volume(c: Context, name: str, size: int, location: str, snapshot: str = None) -> object: - """ Create a volume of a given size in a given zone - Ex: (cmd:,,[location],[snapshot])""" +def aws_create_volume( + c: Context, name: str, size: int, location: str, snapshot: str = None +) -> object: + """Create a volume of a given size in a given zone. + + Ex: (cmd:,,[location],[snapshot]) + """ conn = util_get_connection(c) loc = aws_get_location(c, location) if not loc: - c.abort(f'Location does not exist ({location})') + c.abort(f"Location does not exist ({location})") volume = conn.create_volume(name=name, size=size, location=loc, snapshot=snapshot) return volume + @task @Context.wrap_context def aws_list_volumes(c: Context) -> None: from boto.ec2.connection import EC2Connection - from boto.utils import get_instance_metadata + try: cfg = CloudyConfig() - ACCESS_ID = (cfg.cfg_grid['AWS']['access_id'] or '').strip() - SECRET_KEY = (cfg.cfg_grid['AWS']['secret_key'] or '').strip() + ACCESS_ID = (cfg.cfg_grid["AWS"]["access_id"] or "").strip() + SECRET_KEY = (cfg.cfg_grid["AWS"]["secret_key"] or "").strip() except Exception: - c.abort('Unable to read ACCESS_ID, SECRET_KEY') + c.abort("Unable to read ACCESS_ID, SECRET_KEY") conn = EC2Connection(ACCESS_ID, SECRET_KEY) volumes = [v for v in conn.get_all_volumes()] print(volumes, file=sys.stderr) - - - diff --git a/cloudy/db/__init__.py b/cloudy/db/__init__.py index 89140c6..6106393 100644 --- a/cloudy/db/__init__.py +++ b/cloudy/db/__init__.py @@ -1,12 +1,12 @@ +import importlib import os import re import types -import importlib -PACKAGE = 'cloudy.db' +PACKAGE = "cloudy.db" MODULE_RE = r"^[^.].*\.py$" -PREFIX = ['db_'] -SKIP = {'__init__.py'} +PREFIX = ["db_"] +SKIP = {"__init__.py"} functions = [] module_dir = os.path.dirname(__file__) @@ -15,7 +15,7 @@ if fname in SKIP or not re.match(MODULE_RE, fname): continue mod_name = fname[:-3] - module = importlib.import_module(f'{PACKAGE}.{mod_name}') + module = importlib.import_module(f"{PACKAGE}.{mod_name}") for name in dir(module): if any(name.startswith(p) for p in PREFIX): item = getattr(module, name) diff --git a/cloudy/db/mysql.py b/cloudy/db/mysql.py index 340dd6e..6c154b8 100644 --- a/cloudy/db/mysql.py +++ b/cloudy/db/mysql.py @@ -1,49 +1,54 @@ import re import sys -from operator import itemgetter + from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context @task @Context.wrap_context def db_mysql_latest_version(c: Context) -> str: """Get the latest available MySQL version.""" - latest_version: str = '' - result = c.run('apt-cache search --names-only mysql-client', hide=True, warn=True) - version_re = re.compile(r'mysql-client-([0-9.]+)\s-') - versions = [ver.group(1) for line in result.stdout.split('\n') if (ver := version_re.search(line.lower()))] + latest_version: str = "" + result = c.run("apt-cache search --names-only mysql-client", hide=True, warn=True) + version_re = re.compile(r"mysql-client-([0-9.]+)\s-") + versions = [ + ver.group(1) + for line in result.stdout.split("\n") + if (ver := version_re.search(line.lower())) + ] versions.sort(reverse=True) try: latest_version = versions[0] except IndexError: pass - print(f'Latest available mysql is: [{latest_version}]', file=sys.stderr) + print(f"Latest available mysql is: [{latest_version}]", file=sys.stderr) return latest_version @task @Context.wrap_context -def db_mysql_server_install(c: Context, version: str = '') -> None: +def db_mysql_server_install(c: Context, version: str = "") -> None: """Install MySQL Server.""" if not version: version = db_mysql_latest_version(c) - requirements = f'mysql-server-{version}' - c.sudo(f'DEBIAN_FRONTEND=noninteractive apt -y install {requirements}') - sys_etc_git_commit(c, f'Installed MySQL Server ({version})') + requirements = f"mysql-server-{version}" + c.sudo(f"DEBIAN_FRONTEND=noninteractive apt -y install {requirements}") + sys_etc_git_commit(c, f"Installed MySQL Server ({version})") @task @Context.wrap_context -def db_mysql_client_install(c: Context, version: str = '') -> None: +def db_mysql_client_install(c: Context, version: str = "") -> None: """Install MySQL Client.""" if not version: version = db_mysql_latest_version(c) - requirements = f'mysql-client-{version}' - c.sudo(f'DEBIAN_FRONTEND=noninteractive apt -y install {requirements}') - sys_etc_git_commit(c, f'Installed MySQL Client ({version})') + requirements = f"mysql-client-{version}" + c.sudo(f"DEBIAN_FRONTEND=noninteractive apt -y install {requirements}") + sys_etc_git_commit(c, f"Installed MySQL Client ({version})") @task @@ -51,31 +56,38 @@ def db_mysql_client_install(c: Context, version: str = '') -> None: def db_mysql_set_root_password(c: Context, password: str) -> None: """Set MySQL root password.""" if not password: - print('Password required for mysql root', file=sys.stderr) + print("Password required for mysql root", file=sys.stderr) return - c.sudo(f'mysqladmin -u root password {password}') - sys_etc_git_commit(c, 'Set MySQL Root Password') + c.sudo(f"mysqladmin -u root password {password}") + sys_etc_git_commit(c, "Set MySQL Root Password") @task @Context.wrap_context def db_mysql_create_database(c: Context, root_pass: str, db_name: str) -> None: """Create a new MySQL database.""" - c.sudo(f'echo "CREATE DATABASE {db_name} CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;" | sudo mysql -u root -p{root_pass}') + c.sudo( + f'echo "CREATE DATABASE {db_name} CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;" | ' + f"sudo mysql -u root -p{root_pass}" + ) @task @Context.wrap_context def db_mysql_create_user(c: Context, root_pass: str, user: str, user_pass: str) -> None: """Create a new MySQL user.""" - c.sudo(f'echo "CREATE USER \'{user}\'@\'localhost\' IDENTIFIED BY \'{user_pass}\';" | sudo mysql -u root -p{root_pass}') + c.sudo( + f"echo \"CREATE USER '{user}'@'localhost' IDENTIFIED BY '{user_pass}';\" | " + f"sudo mysql -u root -p{root_pass}" + ) @task @Context.wrap_context def db_mysql_grant_user(c: Context, root_pass: str, user: str, database: str) -> None: """Grant all privileges on a database to a user.""" - c.sudo(f'echo "GRANT ALL PRIVILEGES ON {database}.* TO \'{user}\'@\'localhost\';" | sudo mysql -u root -p{root_pass}') + c.sudo( + f"echo \"GRANT ALL PRIVILEGES ON {database}.* TO '{user}'@'localhost';\" | " + f"sudo mysql -u root -p{root_pass}" + ) c.sudo(f'echo "FLUSH PRIVILEGES;" | sudo mysql -u root -p{root_pass}') - - diff --git a/cloudy/db/pgbouncer.py b/cloudy/db/pgbouncer.py index 771791d..e38b412 100644 --- a/cloudy/db/pgbouncer.py +++ b/cloudy/db/pgbouncer.py @@ -1,54 +1,48 @@ import os + from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context @task @Context.wrap_context def db_pgbouncer_install(c: Context) -> None: """Install pgbouncer.""" - c.sudo('apt -y install pgbouncer') - sys_etc_git_commit(c, 'Installed pgbouncer') + c.sudo("apt -y install pgbouncer") + sys_etc_git_commit(c, "Installed pgbouncer") @task @Context.wrap_context -def db_pgbouncer_configure( - c: Context, dbhost: str = '', dbport: int = 5432 -) -> None: +def db_pgbouncer_configure(c: Context, dbhost: str = "", dbport: int = 5432) -> None: """Configure pgbouncer with given dbhost and dbport.""" - cfgdir = os.path.join(os.path.dirname(__file__), '../cfg') - localcfg = os.path.expanduser(os.path.join(cfgdir, 'pgbouncer/pgbouncer.ini')) - remotecfg = '/etc/pgbouncer/pgbouncer.ini' - c.sudo(f'rm -rf {remotecfg}') - c.put(localcfg, remotecfg) + cfgdir = os.path.join(os.path.dirname(__file__), "../cfg") + localcfg = os.path.expanduser(os.path.join(cfgdir, "pgbouncer/pgbouncer.ini")) + remotecfg = "/etc/pgbouncer/pgbouncer.ini" + c.sudo(f"rm -rf {remotecfg}") + c.put(localcfg, "/tmp/pgbouncer.ini") + c.sudo(f"mv /tmp/pgbouncer.ini {remotecfg}") c.sudo(f'sed -i "s/dbport/{dbport}/g" {remotecfg}') if dbhost: c.sudo(f'sed -i "s/dbhost/{dbhost}/g" {remotecfg}') - localdefault = os.path.expanduser(os.path.join(cfgdir, 'pgbouncer/default-pgbouncer')) - remotedefault = '/etc/default/pgbouncer' - c.sudo(f'rm -rf {remotedefault}') - c.put(localdefault, remotedefault) - sys_etc_git_commit(c, 'Configured pgbouncer') + localdefault = os.path.expanduser(os.path.join(cfgdir, "pgbouncer/default-pgbouncer")) + remotedefault = "/etc/default/pgbouncer" + c.sudo(f"rm -rf {remotedefault}") + c.put(localdefault, "/tmp/default-pgbouncer") + c.sudo(f"mv /tmp/default-pgbouncer {remotedefault}") + sys_etc_git_commit(c, "Configured pgbouncer") @task @Context.wrap_context -def db_pgbouncer_set_user_password( - c: Context, user: str, password: str -) -> None: +def db_pgbouncer_set_user_password(c: Context, user: str, password: str) -> None: """Add user:pass to auth_user in pgbouncer userlist.txt.""" - userlist = '/etc/pgbouncer/userlist.txt' - c.sudo(f'touch {userlist}') - c.sudo(f'echo \'"{user}" "{password}"\' >> {userlist}') - c.sudo(f'chown postgres:postgres {userlist}') - c.sudo(f'chmod 600 {userlist}') - - - - - - - + userlist = "/etc/pgbouncer/userlist.txt" + c.sudo(f"touch {userlist}") + c.run(f'echo \\"{user}\\" \\"{password}\\" > /tmp/pgb_user') + c.sudo(f"cat /tmp/pgb_user >> {userlist} && rm /tmp/pgb_user") + c.sudo(f"chown postgres:postgres {userlist}") + c.sudo(f"chmod 600 {userlist}") diff --git a/cloudy/db/pgis.py b/cloudy/db/pgis.py index 6c70cd5..a73a731 100644 --- a/cloudy/db/pgis.py +++ b/cloudy/db/pgis.py @@ -1,92 +1,103 @@ import re import sys -from operator import itemgetter + from fabric import task -from cloudy.util.context import Context + from cloudy.db.psql import db_psql_default_installed_version -from cloudy.sys.etc import sys_etc_git_commit from cloudy.sys.core import sys_start_service +from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context -def db_pgis_install(c: Context, psql_version: str = '', pgis_version: str = '') -> None: +def db_pgis_install(c: Context, psql_version: str = "", pgis_version: str = "") -> None: """Install postgis for a given postgres version.""" if not psql_version: psql_version = db_psql_default_installed_version(c) if not pgis_version: pgis_version = db_pgis_get_latest_version(c, psql_version) - - requirements = ' '.join([ - f'postgresql-{psql_version}-postgis-{pgis_version}', - 'postgis', - 'libproj-dev', - 'gdal-bin', - 'binutils', - 'libgeos-c1v5', - 'libgeos-dev', - 'libgdal-dev', - 'libgeoip-dev', - 'libpq-dev', - 'libxml2', - 'libxml2-dev', - 'libxml2-utils', - 'libjson-c-dev', - 'xsltproc', - 'docbook-xsl', - 'docbook-mathml', - ]) - c.sudo('apt -y purge postgis') - c.sudo(f'apt -y install {requirements}') - sys_start_service(c, 'postgresql') - sys_etc_git_commit(c, f'Installed postgis for psql ({psql_version})') + + requirements = " ".join( + [ + f"postgresql-{psql_version}-postgis-{pgis_version}", + "postgis", + "libproj-dev", + "gdal-bin", + "binutils", + "libgeos-c1v5", + "libgeos-dev", + "libgdal-dev", + "libgeoip-dev", + "libpq-dev", + "libxml2", + "libxml2-dev", + "libxml2-utils", + "libjson-c-dev", + "xsltproc", + "docbook-xsl", + "docbook-mathml", + ] + ) + c.sudo("apt -y purge postgis") + c.sudo(f"apt -y install {requirements}") + sys_start_service(c, "postgresql") + sys_etc_git_commit(c, f"Installed postgis for psql ({psql_version})") + @task @Context.wrap_context -def db_pgis_get_latest_version(c: Context, pg_version: str = '') -> str: +def db_pgis_get_latest_version(c: Context, pg_version: str = "") -> str: """Return the latest available postgis version for pg_version.""" if not pg_version: pg_version = db_psql_default_installed_version(c) - latest_version: str = '' - result = c.run('apt-cache search --names-only postgis', hide=True, warn=True) - version_re = re.compile(r'postgresql-[0-9.]+-postgis-([0-9.]+)\s-') - versions = [ver.group(1) for line in result.stdout.split('\n') if (ver := version_re.search(line.lower()))] + latest_version: str = "" + result = c.run("apt-cache search --names-only postgis", hide=True, warn=True) + version_re = re.compile(r"postgresql-[0-9.]+-postgis-([0-9.]+)\s-") + versions = [ + ver.group(1) + for line in result.stdout.split("\n") + if (ver := version_re.search(line.lower())) + ] versions.sort(reverse=True) try: latest_version = versions[0] except IndexError: pass - print(f'Latest available postgis is: [{latest_version}]', file=sys.stderr) + print(f"Latest available postgis is: [{latest_version}]", file=sys.stderr) return latest_version + @task @Context.wrap_context def db_pgis_get_latest_libgeos_version(c: Context) -> str: """Return the latest libgeos version.""" - latest_version: str = '' - result = c.run('apt-cache search --names-only libgeos', hide=True, warn=True) - + latest_version: str = "" + result = c.run("apt-cache search --names-only libgeos", hide=True, warn=True) + # Updated regex to match common libgeos package patterns - version_re = re.compile(r'libgeos-?([0-9]+(?:\.[0-9]+)*)') - + version_re = re.compile(r"libgeos-?([0-9]+(?:\.[0-9]+)*)") + versions = [] - for line in result.stdout.split('\n'): - if (ver := version_re.search(line.lower())): + for line in result.stdout.split("\n"): + if ver := version_re.search(line.lower()): versions.append(ver.group(1)) - + # Sort versions properly (semantic versioning) if versions: - versions.sort(key=lambda x: [int(i) for i in x.split('.')], reverse=True) + versions.sort(key=lambda x: [int(i) for i in x.split(".")], reverse=True) latest_version = versions[0] - print(f'Latest available libgeos is: [{latest_version}]', file=sys.stderr) + print(f"Latest available libgeos is: [{latest_version}]", file=sys.stderr) return latest_version + @task @Context.wrap_context def db_pgis_configure( - c: Context, pg_version: str = '', pgis_version: str = '', legacy: bool = False + c: Context, pg_version: str = "", pgis_version: str = "", legacy: bool = False ) -> None: """Configure postgis template.""" if not pg_version: @@ -95,31 +106,37 @@ def db_pgis_configure( pgis_version = db_pgis_get_latest_version(c, pg_version) # Allows non-superusers the ability to create from this template - c.sudo('sudo -u postgres psql -d postgres -c "UPDATE pg_database SET datistemplate=\'false\' WHERE datname=\'template_postgis\';"') + c.sudo( + "sudo -u postgres psql -d postgres -c " + "\"UPDATE pg_database SET datistemplate='false' WHERE datname='template_postgis';\"" + ) c.sudo('sudo -u postgres psql -d postgres -c "DROP DATABASE template_postgis;"', warn=True) - c.sudo('sudo -u postgres createdb -E UTF8 template_postgis') + c.sudo("sudo -u postgres createdb -E UTF8 template_postgis") c.sudo('sudo -u postgres psql -d template_postgis -c "CREATE EXTENSION postgis;"', warn=True) - c.sudo('sudo -u postgres psql -d template_postgis -c "CREATE EXTENSION postgis_topology;"', warn=True) + c.sudo( + 'sudo -u postgres psql -d template_postgis -c "CREATE EXTENSION postgis_topology;"', + warn=True, + ) if legacy: - postgis_path = f'/usr/share/postgresql/{pg_version}/contrib/postgis-{pgis_version}' - c.sudo(f'sudo -u postgres psql -d template_postgis -f {postgis_path}/legacy.sql') + postgis_path = f"/usr/share/postgresql/{pg_version}/contrib/postgis-{pgis_version}" + c.sudo(f"sudo -u postgres psql -d template_postgis -f {postgis_path}/legacy.sql") # Enabling users to alter spatial tables. - c.sudo('sudo -u postgres psql -d template_postgis -c "GRANT ALL ON geometry_columns TO PUBLIC;"') + c.sudo( + 'sudo -u postgres psql -d template_postgis -c "GRANT ALL ON geometry_columns TO PUBLIC;"' + ) c.sudo('sudo -u postgres psql -d template_postgis -c "GRANT ALL ON spatial_ref_sys TO PUBLIC;"') - c.sudo('sudo -u postgres psql -d template_postgis -c "GRANT ALL ON geography_columns TO PUBLIC;"') + c.sudo( + 'sudo -u postgres psql -d template_postgis -c "GRANT ALL ON geography_columns TO PUBLIC;"' + ) + + sys_etc_git_commit(c, f"Configured postgis ({pgis_version}) for psql ({pg_version})") - sys_etc_git_commit(c, f'Configured postgis ({pgis_version}) for psql ({pg_version})') @task @Context.wrap_context def db_pgis_get_database_gis_info(c: Context, dbname: str) -> None: """Return the postgis version of a postgis database.""" c.sudo(f'sudo -u postgres psql -d {dbname} -c "SELECT PostGIS_Version();"') - - - - - diff --git a/cloudy/db/pgpool.py b/cloudy/db/pgpool.py index 7c919f7..23b3c7f 100644 --- a/cloudy/db/pgpool.py +++ b/cloudy/db/pgpool.py @@ -1,41 +1,40 @@ import os + from fabric import task -from cloudy.util.context import Context -from cloudy.sys.etc import sys_etc_git_commit + from cloudy.sys.core import sys_restart_service +from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context @task @Context.wrap_context def db_pgpool2_install(c: Context) -> None: """Install pgpool2.""" - c.sudo('apt -y install pgpool2') - sys_etc_git_commit(c, 'Installed pgpool2') + c.sudo("apt -y install pgpool2") + sys_etc_git_commit(c, "Installed pgpool2") @task @Context.wrap_context def db_pgpool2_configure( - c: Context, dbhost: str = '', dbport: str = '5432', localport: str = '5432' + c: Context, dbhost: str = "", dbport: str = "5432", localport: str = "5432" ) -> None: """Configure pgpool2 with given dbhost, dbport, and localport.""" - cfgdir = os.path.join(os.path.dirname(__file__), '../cfg') - localcfg = os.path.expanduser(os.path.join(cfgdir, 'pgpool2/pgpool.conf')) - remotecfg = '/etc/pgpool2/pgpool.conf' - c.sudo(f'rm -rf {remotecfg}') - c.put(localcfg, remotecfg) + cfgdir = os.path.join(os.path.dirname(__file__), "../cfg") + localcfg = os.path.expanduser(os.path.join(cfgdir, "pgpool2/pgpool.conf")) + remotecfg = "/etc/pgpool2/pgpool.conf" + c.sudo(f"rm -rf {remotecfg}") + c.put(localcfg, "/tmp/pgpool.conf") + c.sudo(f"mv /tmp/pgpool.conf {remotecfg}") c.sudo(f'sed -i "s/dbhost/{dbhost}/g" {remotecfg}') c.sudo(f'sed -i "s/dbport/{dbport}/g" {remotecfg}') c.sudo(f'sed -i "s/localport/{localport}/g" {remotecfg}') - localdefault = os.path.expanduser(os.path.join(cfgdir, 'pgpool2/default-pgpool2')) - remotedefault = '/etc/default/pgpool2' - c.sudo(f'rm -rf {remotedefault}') - c.put(localdefault, remotedefault) - sys_etc_git_commit(c, 'Configured pgpool2') - sys_restart_service(c, 'pgpool2') - - - - - + localdefault = os.path.expanduser(os.path.join(cfgdir, "pgpool2/default-pgpool2")) + remotedefault = "/etc/default/pgpool2" + c.sudo(f"rm -rf {remotedefault}") + c.put(localdefault, "/tmp/default-pgpool2") + c.sudo(f"mv /tmp/default-pgpool2 {remotedefault}") + sys_etc_git_commit(c, "Configured pgpool2") + sys_restart_service(c, "pgpool2") diff --git a/cloudy/db/psql.py b/cloudy/db/psql.py index 800d5e8..3e3c332 100644 --- a/cloudy/db/psql.py +++ b/cloudy/db/psql.py @@ -1,52 +1,61 @@ +import datetime import os import re import sys -import datetime -import shlex from typing import Optional + from fabric import task -from cloudy.util.context import Context + from cloudy.sys import core +from cloudy.util.context import Context + @task @Context.wrap_context def db_psql_install_postgres_repo(c: Context) -> None: """Install the official PostgreSQL repository using modern gpg keyring approach.""" - + # Create the keyring directory if it doesn't exist - c.sudo('mkdir -p /etc/apt/keyrings') - + c.sudo("mkdir -p /etc/apt/keyrings") + # Download and install the PostgreSQL signing key to a dedicated keyring file # Force overwrite if file exists - c.sudo('wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | ' - 'gpg --dearmor --yes -o /etc/apt/keyrings/postgresql.gpg') - + c.sudo("wget --quiet -O /tmp/postgresql.asc https://www.postgresql.org/media/keys/ACCC4CF8.asc") + c.sudo("gpg --dearmor --yes -o /etc/apt/keyrings/postgresql.gpg /tmp/postgresql.asc") + c.sudo("rm -f /tmp/postgresql.asc") + # Set proper permissions for the keyring file - c.sudo('chmod 644 /etc/apt/keyrings/postgresql.gpg') - + c.sudo("chmod 644 /etc/apt/keyrings/postgresql.gpg") + # Add the PostgreSQL repository with the signed-by option pointing to the keyring - c.sudo('echo "deb [signed-by=/etc/apt/keyrings/postgresql.gpg] ' - 'https://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > ' - '/etc/apt/sources.list.d/pgdg.list') - + c.sudo( + "sh -c 'echo \"deb [signed-by=/etc/apt/keyrings/postgresql.gpg] " + 'https://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" ' + "> /etc/apt/sources.list.d/pgdg.list'" + ) + # Update package lists - c.sudo('apt update') + c.sudo("apt update") + @task @Context.wrap_context def db_psql_latest_version(c: Context) -> str: """Get the latest available postgres version.""" db_psql_install_postgres_repo(c) - latest_version: str = '' - + latest_version: str = "" + # Search for all postgresql-client packages with version numbers - result = c.run('apt-cache search postgresql-client- | grep "postgresql-client-[0-9]"', hide=True, warn=True) - - # Updated regex to match the actual format: postgresql-client-15 - client libraries and client binaries - version_re = re.compile(r'postgresql-client-(\d+(?:\.\d+)?)\s') - + result = c.run( + 'apt-cache search postgresql-client- | grep "postgresql-client-[0-9]"', hide=True, warn=True + ) + + # Updated regex to match the actual format: + # postgresql-client-15 - client libraries and client binaries + version_re = re.compile(r"postgresql-client-(\d+(?:\.\d+)?)\s") + versions = [] - for line in result.stdout.split('\n'): + for line in result.stdout.split("\n"): if line.strip(): # Skip empty lines match = version_re.search(line) if match: @@ -57,279 +66,315 @@ def db_psql_latest_version(c: Context) -> str: except ValueError: # Handle cases where version might not be a simple number versions.append((0, version)) - + # Sort by numerical value (descending) and get the string version if versions: versions.sort(key=lambda x: x[0], reverse=True) latest_version = versions[0][1] - - print(f'Latest available postgresql is: [{latest_version}]', file=sys.stderr) + + print(f"Latest available postgresql is: [{latest_version}]", file=sys.stderr) return latest_version + @task @Context.wrap_context def db_psql_default_installed_version(c: Context) -> str: """Get the default installed postgres version.""" - default_version: str = '' - + default_version: str = "" + try: - result = c.run('psql --version', hide=True, warn=True) - + result = c.run("psql --version", hide=True, warn=True) + # Modern PostgreSQL version output: "psql (PostgreSQL) 15.4" # Legacy format: "psql (PostgreSQL) 9.6.24" - version_re = re.compile(r'psql\s+\(postgresql\)\s+(\d+(?:\.\d+)?)', re.IGNORECASE) - + version_re = re.compile(r"psql\s+\(postgresql\)\s+(\d+(?:\.\d+)?)", re.IGNORECASE) + match = version_re.search(result.stdout.strip()) if match: full_version = match.group(1) # For major versions >= 10, use just the major version (e.g., "15" not "15.4") # For versions < 10, use major.minor (e.g., "9.6" not "9.6.24") - version_parts = full_version.split('.') + version_parts = full_version.split(".") if len(version_parts) >= 1: major_version = int(version_parts[0]) if major_version >= 10: default_version = str(major_version) else: # For 9.x versions, include the minor version - default_version = f"{version_parts[0]}.{version_parts[1]}" if len(version_parts) > 1 else version_parts[0] - + default_version = ( + f"{version_parts[0]}.{version_parts[1]}" + if len(version_parts) > 1 + else version_parts[0] + ) + except Exception as e: - print(f'Error getting PostgreSQL version: {e}', file=sys.stderr) - - print(f'Default installed postgresql is: [{default_version}]', file=sys.stderr) + print(f"Error getting PostgreSQL version: {e}", file=sys.stderr) + + print(f"Default installed postgresql is: [{default_version}]", file=sys.stderr) return default_version + @task @Context.wrap_context -def db_psql_install(c: Context, version: str = '') -> None: +def db_psql_install(c: Context, version: str = "") -> None: """Install postgres of a given version or the latest version.""" db_psql_install_postgres_repo(c) - + if not version: version = db_psql_latest_version(c) - + if not version: raise ValueError("Could not determine PostgreSQL version to install") - - print(f'Installing PostgreSQL version: {version}', file=sys.stderr) - + + print(f"Installing PostgreSQL version: {version}", file=sys.stderr) + # Core PostgreSQL packages - these should always be available core_requirements = [ - f'postgresql-{version}', - f'postgresql-client-{version}', - f'postgresql-contrib-{version}', - 'postgresql-client-common' + f"postgresql-{version}", + f"postgresql-client-{version}", + f"postgresql-contrib-{version}", + "postgresql-client-common", ] - + # Optional development package - might not exist for all versions - dev_package = f'postgresql-server-dev-{version}' - + dev_package = f"postgresql-server-dev-{version}" + # Check if dev package exists before adding it - dev_check = c.run(f'apt-cache show {dev_package}', hide=True, warn=True) + dev_check = c.run(f"apt-cache show {dev_package}", hide=True, warn=True) if dev_check.ok: core_requirements.append(dev_package) else: - print(f'Warning: {dev_package} not available, skipping', file=sys.stderr) - - requirements = ' '.join(core_requirements) - + print(f"Warning: {dev_package} not available, skipping", file=sys.stderr) + + requirements = " ".join(core_requirements) + # Install with better error handling - result = c.sudo(f'apt -y install {requirements}', warn=True) - + result = c.sudo(f"apt -y install {requirements}", warn=True) + if not result.ok: - print(f'Installation failed. Trying alternative package names...', file=sys.stderr) + print("Installation failed. Trying alternative package names...", file=sys.stderr) # Fallback: try with different package naming for older versions fallback_requirements = [ - f'postgresql-{version}', - f'postgresql-client-{version}', - f'postgresql-contrib-{version}', - 'postgresql-client-common' + f"postgresql-{version}", + f"postgresql-client-{version}", + f"postgresql-contrib-{version}", + "postgresql-client-common", ] - fallback_cmd = ' '.join(fallback_requirements) - c.sudo(f'apt -y install {fallback_cmd}') - + fallback_cmd = " ".join(fallback_requirements) + c.sudo(f"apt -y install {fallback_cmd}") + # Verify installation verify_result = c.run(f'dpkg -l | grep "postgresql-{version}"', hide=True, warn=True) if verify_result.ok and verify_result.stdout.strip(): - print(f'PostgreSQL {version} installed successfully', file=sys.stderr) - core.sys_etc_git_commit(c, f'Installed postgres ({version})') + print(f"PostgreSQL {version} installed successfully", file=sys.stderr) + core.sys_etc_git_commit(c, f"Installed postgres ({version})") else: - raise RuntimeError(f'PostgreSQL {version} installation verification failed') + raise RuntimeError(f"PostgreSQL {version} installation verification failed") + @task @Context.wrap_context -def db_psql_client_install(c: Context, version: str = '') -> None: +def db_psql_client_install(c: Context, version: str = "") -> None: """Install postgres client of a given version or the latest version.""" db_psql_install_postgres_repo(c) # Add this line - + if not version: version = db_psql_latest_version(c) - + if not version: # Add validation raise ValueError("Could not determine PostgreSQL version") - + # Try with dev package first, fallback without it try: - requirements = f'postgresql-client-{version} postgresql-server-dev-{version} postgresql-client-common' - c.sudo(f'apt -y install {requirements}') - except: + requirements = ( + f"postgresql-client-{version} postgresql-server-dev-{version} postgresql-client-common" + ) + c.sudo(f"apt -y install {requirements}") + except Exception: # Fallback without dev package - requirements = f'postgresql-client-{version} postgresql-client-common' - c.sudo(f'apt -y install {requirements}') - - core.sys_etc_git_commit(c, f'Installed postgres client ({version})') + requirements = f"postgresql-client-{version} postgresql-client-common" + c.sudo(f"apt -y install {requirements}") + + core.sys_etc_git_commit(c, f"Installed postgres client ({version})") + @task @Context.wrap_context -def db_psql_make_data_dir(c: Context, version: str = '', data_dir: str = '/var/lib/postgresql') -> str: +def db_psql_make_data_dir( + c: Context, version: str = "", data_dir: str = "/var/lib/postgresql" +) -> str: """Make data directory for the postgres cluster.""" if not version: version = db_psql_latest_version(c) - + if not version: raise ValueError("Could not determine PostgreSQL version for data directory") - + # Create the version-specific data directory path - data_dir = os.path.abspath(os.path.join(data_dir, f'{version}')) - + data_dir = os.path.abspath(os.path.join(data_dir, f"{version}")) + # Create directory with proper permissions - c.sudo(f'mkdir -p {data_dir}') - + c.sudo(f"mkdir -p {data_dir}") + # Set proper ownership and permissions for PostgreSQL # PostgreSQL requires the data directory to be owned by postgres user # and have restrictive permissions (700) - c.sudo(f'chown postgres:postgres {data_dir}') - c.sudo(f'chmod 700 {data_dir}') - - print(f'Created PostgreSQL data directory: {data_dir}', file=sys.stderr) - + c.sudo(f"chown postgres:postgres {data_dir}") + c.sudo(f"chmod 700 {data_dir}") + + print(f"Created PostgreSQL data directory: {data_dir}", file=sys.stderr) + return data_dir + @task @Context.wrap_context def db_psql_remove_cluster(c: Context, version: str, cluster: str) -> None: """Remove a cluster if exists.""" # Check if cluster exists first - check_result = c.run(f'pg_lsclusters | grep -q "^{version}\\s\\+{cluster}\\s"', warn=True, hide=True) + check_result = c.run( + f'pg_lsclusters | grep -q "^{version}\\s\\+{cluster}\\s"', warn=True, hide=True + ) if check_result.failed: print(f"Cluster '{version}/{cluster}' does not exist") return - + # Protect against removing main system cluster without explicit confirmation - if cluster == 'main' and version in ['14', '15', '16', '17']: + if cluster == "main" and version in ["14", "15", "16", "17"]: print(f"Warning: Removing main cluster for PostgreSQL {version}") - + # Stop and remove the cluster - result = c.run(f'pg_dropcluster --stop {version} {cluster}', warn=True) - + result = c.sudo(f"pg_dropcluster --stop {version} {cluster}", warn=True) + if result.failed: print(f"Failed to remove cluster '{version}/{cluster}': {result.stderr}") return - + print(f"Successfully removed PostgreSQL cluster '{version}/{cluster}'") - core.sys_etc_git_commit(c, f'Removed postgres cluster ({version} {cluster})') + core.sys_etc_git_commit(c, f"Removed postgres cluster ({version} {cluster})") + @task @Context.wrap_context def db_psql_create_cluster( c: Context, - version: str = '', - cluster: str = 'main', - encoding: str = 'UTF-8', - data_dir: str = '/var/lib/postgresql' + version: str = "", + cluster: str = "main", + encoding: str = "UTF-8", + data_dir: str = "/var/lib/postgresql", ) -> None: """Make a new postgresql cluster.""" if not version: version = db_psql_default_installed_version(c) or db_psql_latest_version(c) db_psql_remove_cluster(c, version, cluster) data_dir = db_psql_make_data_dir(c, version, data_dir) - c.sudo(f'chown -R postgres {data_dir}') - c.sudo(f'pg_createcluster --start -e {encoding} {version} {cluster} -d {data_dir}') - core.sys_start_service(c, 'postgresql') - core.sys_etc_git_commit(c, f'Created new postgres cluster ({version} {cluster})') + c.sudo(f"chown -R postgres {data_dir}") + c.sudo(f"pg_createcluster --start -e {encoding} {version} {cluster} -d {data_dir}") + core.sys_start_service(c, "postgresql") + core.sys_etc_git_commit(c, f"Created new postgres cluster ({version} {cluster})") + @task @Context.wrap_context -def db_psql_set_permission( - c: Context, version: str = '', cluster: str = 'main' -) -> None: +def db_psql_set_permission(c: Context, version: str = "", cluster: str = "main") -> None: """Set default permission for postgresql.""" if not version: version = db_psql_default_installed_version(c) - cfgdir = os.path.join(os.path.dirname(__file__), '../cfg') - localcfg = os.path.expanduser(os.path.join(cfgdir, 'postgresql/pg_hba.conf')) - remotecfg = f'/etc/postgresql/{version}/{cluster}/pg_hba.conf' - c.sudo(f'rm -rf {remotecfg}') - c.put(localcfg, remotecfg) - c.sudo(f'chown postgres:postgres {remotecfg}') - c.sudo(f'chmod 644 {remotecfg}') - core.sys_start_service(c, 'postgresql') - core.sys_etc_git_commit(c, f'Set default postgres access for cluster ({version} {cluster})') + cfgdir = os.path.join(os.path.dirname(__file__), "../cfg") + localcfg = os.path.expanduser(os.path.join(cfgdir, "postgresql/pg_hba.conf")) + remotecfg = f"/etc/postgresql/{version}/{cluster}/pg_hba.conf" + c.sudo(f"rm -rf {remotecfg}") + c.put(localcfg, "/tmp/pg_hba.conf") + c.sudo(f"mv /tmp/pg_hba.conf {remotecfg}") + c.sudo(f"chown postgres:postgres {remotecfg}") + c.sudo(f"chmod 644 {remotecfg}") + core.sys_start_service(c, "postgresql") + core.sys_etc_git_commit(c, f"Set default postgres access for cluster ({version} {cluster})") + @task @Context.wrap_context def db_psql_configure( c: Context, - version: str = '', - cluster: str = 'main', - port: str = '5432', - interface: str = '*', - restart: bool = False + version: str = "", + cluster: str = "main", + port: str = "5432", + interface: str = "*", + restart: bool = False, ) -> None: """Configure postgres.""" if not version: version = db_psql_default_installed_version(c) - + # Find where postgresql.conf actually is - search_result = c.run(f'find /etc /usr/local /var -name "postgresql.conf" 2>/dev/null | head -1', warn=True, hide=True) - + search_result = c.run( + 'find /etc /usr/local /var -name "postgresql.conf" 2>/dev/null | head -1', + warn=True, + hide=True, + ) + if search_result.stdout.strip(): - postgresql_conf = search_result.stdout.strip().split('\n')[0] + postgresql_conf = search_result.stdout.strip().split("\n")[0] else: raise FileNotFoundError(f"PostgreSQL configuration file not found for version {version}") - - c.sudo(f"sed -i 's/#listen_addresses\\s*=\\s*'\"'\"'localhost'\"'\"'/listen_addresses = '\"'\"'{interface},127.0.0.1'\"'\"'/g' {postgresql_conf}") - core.sys_etc_git_commit(c, f'Configured postgres cluster ({version} {cluster})') + + sed_pattern = ( + f"s/#listen_addresses\\s*=\\s*'\"'\"'localhost'\"'\"'/" + f"listen_addresses = '\"'\"'{interface},127.0.0.1'\"'\"'/g" + ) + c.sudo(f"sed -i '{sed_pattern}' {postgresql_conf}") + core.sys_etc_git_commit(c, f"Configured postgres cluster ({version} {cluster})") if restart: - core.sys_start_service(c, 'postgresql') + core.sys_start_service(c, "postgresql") + @task @Context.wrap_context -def db_psql_dump_database(c: Context, dump_dir: str, db_name: str, dump_name: Optional[str] = None) -> None: +def db_psql_dump_database( + c: Context, dump_dir: str, db_name: str, dump_name: Optional[str] = None +) -> None: """Backup (dump) a database and save into a given directory.""" # Check if directory exists, create if not - result = c.run(f'test -d {dump_dir}', warn=True) + result = c.run(f"test -d {dump_dir}", warn=True) if result.failed: - c.sudo(f'mkdir -p {dump_dir}') - + c.sudo(f"mkdir -p {dump_dir}") + if not dump_name: now = datetime.datetime.now() - dump_name = f"{db_name}_{now.year:04d}_{now.month:02d}_{now.day:02d}_{now.hour:02d}_{now.minute:02d}_{now.second:02d}.psql.gz" - + dump_name = ( + f"{db_name}_{now.year:04d}_{now.month:02d}_{now.day:02d}_" + f"{now.hour:02d}_{now.minute:02d}_{now.second:02d}.psql.gz" + ) + dump_path = os.path.join(dump_dir, dump_name) - + # Find pg_dump executable - pg_dump = '/usr/bin/pg_dump' - result = c.run(f'test -x {pg_dump}', warn=True) + pg_dump = "/usr/bin/pg_dump" + result = c.run(f"test -x {pg_dump}", warn=True) if result.failed: - which_result = c.run('which pg_dump', warn=True, hide=True) + which_result = c.run("which pg_dump", warn=True, hide=True) if which_result.failed: raise FileNotFoundError("pg_dump command not found. Is PostgreSQL client installed?") pg_dump = which_result.stdout.strip() - + # Check if database exists - db_check = c.run(f'sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {db_name}', warn=True) + db_check = c.run( + f"sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {db_name}", warn=True + ) if db_check.failed: raise ValueError(f"Database '{db_name}' does not exist") - + # Perform the dump - c.sudo(f'sudo -u postgres {pg_dump} --no-owner --no-acl -h localhost {db_name} | gzip > {dump_path}') - + c.sudo( + f"sudo -u postgres {pg_dump} --no-owner --no-acl -h localhost {db_name} | " + f"gzip > {dump_path}" + ) + # Verify the dump was created and has content - verify_result = c.run(f'test -s {dump_path}', warn=True) + verify_result = c.run(f"test -s {dump_path}", warn=True) if verify_result.failed: raise RuntimeError(f"Database dump failed or resulted in empty file: {dump_path}") - + print(f"Database '{db_name}' successfully dumped to: {dump_path}") @@ -339,12 +384,17 @@ def db_psql_create_adminpack(c: Context) -> None: """Install admin pack.""" c.sudo('sudo -u postgres psql -c "CREATE EXTENSION IF NOT EXISTS adminpack;"') + @task @Context.wrap_context def db_psql_user_password(c: Context, username: str, password: str) -> None: """Change password for a postgres user.""" escaped_password = password.replace("'", "''") # Escape single quotes for SQL - c.sudo(f'sudo -u postgres psql -c "ALTER USER {username} WITH ENCRYPTED PASSWORD \'{escaped_password}\';"') + c.sudo( + f"sudo -u postgres psql -c " + f"\"ALTER USER {username} WITH ENCRYPTED PASSWORD '{escaped_password}';\"" + ) + @task @Context.wrap_context @@ -352,95 +402,140 @@ def db_psql_create_user(c: Context, username: str, password: str) -> None: """Create postgresql user.""" escaped_password = password.replace("'", "''") # Escape single quotes for SQL # Check if user already exists - check_result = c.run(f'sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname=\'{username}\';"', warn=True, hide=True) - if check_result.stdout.strip() == '1': + check_result = c.run( + f"sudo -u postgres psql -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{username}';\"", + warn=True, + hide=True, + ) + if check_result.stdout.strip() == "1": print(f"User '{username}' already exists") return - - c.sudo(f'sudo -u postgres psql -c "CREATE ROLE {username} WITH NOSUPERUSER NOCREATEDB NOCREATEROLE LOGIN ENCRYPTED PASSWORD \'{escaped_password}\';"') + + c.sudo( + f"sudo -u postgres psql -c " + f'"CREATE ROLE {username} WITH NOSUPERUSER NOCREATEDB NOCREATEROLE ' + f"LOGIN ENCRYPTED PASSWORD '{escaped_password}';\"" + ) + @task @Context.wrap_context def db_psql_delete_user(c: Context, username: str) -> None: """Delete postgresql user.""" - if username == 'postgres': + if username == "postgres": print("Cannot drop user 'postgres'", file=sys.stderr) return - + # Check if user exists before trying to drop - check_result = c.run(f'sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname=\'{username}\';"', warn=True, hide=True) - if check_result.stdout.strip() != '1': + check_result = c.run( + f"sudo -u postgres psql -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{username}';\"", + warn=True, + hide=True, + ) + if check_result.stdout.strip() != "1": print(f"User '{username}' does not exist") return - + c.sudo(f'sudo -u postgres psql -c "DROP ROLE {username};"') + @task @Context.wrap_context def db_psql_list_users(c: Context) -> None: """List postgresql users.""" c.sudo('sudo -u postgres psql -c "\\du"') + @task @Context.wrap_context def db_psql_list_databases(c: Context) -> None: """List postgresql databases.""" - c.sudo('sudo -u postgres psql -l') + c.sudo("sudo -u postgres psql -l") + @task @Context.wrap_context def db_psql_create_database(c: Context, dbname: str, dbowner: str) -> None: """Create a postgres database for an existing user.""" # Check if database already exists - check_result = c.run(f'sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {dbname}', warn=True) + check_result = c.run( + f"sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {dbname}", warn=True + ) if not check_result.failed: print(f"Database '{dbname}' already exists") return - + # Check if owner exists - owner_check = c.run(f'sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname=\'{dbowner}\';"', warn=True, hide=True) - if owner_check.stdout.strip() != '1': + owner_check = c.run( + f"sudo -u postgres psql -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{dbowner}';\"", + warn=True, + hide=True, + ) + if owner_check.stdout.strip() != "1": raise ValueError(f"Database owner '{dbowner}' does not exist") - - c.sudo(f'sudo -u postgres createdb -E UTF8 -O {dbowner} {dbname}') + + c.sudo(f"sudo -u postgres createdb -E UTF8 -O {dbowner} {dbname}") + @task @Context.wrap_context def db_psql_add_gis_extension_to_database(c: Context, dbname: str) -> None: """Add gis extension to an existing database.""" - result = c.sudo(f'sudo -u postgres psql -d {dbname} -c "CREATE EXTENSION IF NOT EXISTS postgis;"', warn=True) + result = c.sudo( + f'sudo -u postgres psql -d {dbname} -c "CREATE EXTENSION IF NOT EXISTS postgis;"', warn=True + ) if result.failed: - print(f"Warning: Failed to add PostGIS extension to database '{dbname}'. Extension may not be available.") + print( + f"Warning: Failed to add PostGIS extension to database '{dbname}'. " + f"Extension may not be available." + ) + @task @Context.wrap_context def db_psql_add_gis_topology_extension_to_database(c: Context, dbname: str) -> None: """Add gis topology extension to an existing database.""" - result = c.sudo(f'sudo -u postgres psql -d {dbname} -c "CREATE EXTENSION IF NOT EXISTS postgis_topology;"', warn=True) + result = c.sudo( + f'sudo -u postgres psql -d {dbname} -c "CREATE EXTENSION IF NOT EXISTS postgis_topology;"', + warn=True, + ) if result.failed: - print(f"Warning: Failed to add PostGIS topology extension to database '{dbname}'. Extension may not be available.") + print( + f"Warning: Failed to add PostGIS topology extension to database '{dbname}'. " + f"Extension may not be available." + ) + @task @Context.wrap_context def db_psql_create_gis_database_from_template(c: Context, dbname: str, dbowner: str) -> None: """Create a postgres GIS database from template for an existing user.""" # Check if template exists - template_check = c.run('sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw template_postgis', warn=True) + template_check = c.run( + "sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw template_postgis", warn=True + ) if template_check.failed: raise ValueError("Template 'template_postgis' does not exist") - + # Check if database already exists - check_result = c.run(f'sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {dbname}', warn=True) + check_result = c.run( + f"sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {dbname}", warn=True + ) if not check_result.failed: print(f"Database '{dbname}' already exists") return - + # Check if owner exists - owner_check = c.run(f'sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname=\'{dbowner}\';"', warn=True, hide=True) - if owner_check.stdout.strip() != '1': + owner_check = c.run( + f"sudo -u postgres psql -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{dbowner}';\"", + warn=True, + hide=True, + ) + if owner_check.stdout.strip() != "1": raise ValueError(f"Database owner '{dbowner}' does not exist") - - c.sudo(f'sudo -u postgres createdb -T template_postgis -O {dbowner} {dbname}') + + c.sudo(f"sudo -u postgres createdb -T template_postgis -O {dbowner} {dbname}") + @task @Context.wrap_context @@ -450,34 +545,42 @@ def db_psql_create_gis_database(c: Context, dbname: str, dbowner: str) -> None: db_psql_add_gis_extension_to_database(c, dbname) db_psql_add_gis_topology_extension_to_database(c, dbname) + @task @Context.wrap_context def db_psql_delete_database(c: Context, dbname: str) -> None: """Delete (drop) a database.""" - if dbname in ['postgres', 'template0', 'template1']: + if dbname in ["postgres", "template0", "template1"]: print(f"Cannot drop system database '{dbname}'", file=sys.stderr) return - + # Check if database exists - check_result = c.run(f'sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {dbname}', warn=True) + check_result = c.run( + f"sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {dbname}", warn=True + ) if check_result.failed: print(f"Database '{dbname}' does not exist") return - + c.sudo(f'sudo -u postgres psql -c "DROP DATABASE {dbname};"') + @task @Context.wrap_context def db_psql_grant_database_privileges(c: Context, dbname: str, dbuser: str) -> None: """Grant all privileges on database for an existing user.""" # Check if database exists - db_check = c.run(f'sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {dbname}', warn=True) + db_check = c.run(f"sudo -u postgres psql -lqt | cut -d \\| -f 1 | grep -qw {dbname}", warn=True) if db_check.failed: raise ValueError(f"Database '{dbname}' does not exist") - + # Check if user exists - user_check = c.run(f'sudo -u postgres psql -tAc "SELECT 1 FROM pg_roles WHERE rolname=\'{dbuser}\';"', warn=True, hide=True) - if user_check.stdout.strip() != '1': + user_check = c.run( + f"sudo -u postgres psql -tAc \"SELECT 1 FROM pg_roles WHERE rolname='{dbuser}';\"", + warn=True, + hide=True, + ) + if user_check.stdout.strip() != "1": raise ValueError(f"User '{dbuser}' does not exist") - - c.sudo(f'sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE {dbname} to {dbuser};"') \ No newline at end of file + + c.sudo(f'sudo -u postgres psql -c "GRANT ALL PRIVILEGES ON DATABASE {dbname} to {dbuser};"') diff --git a/cloudy/srv/__init__.py b/cloudy/srv/__init__.py index f1d8eb2..c2c72d4 100644 --- a/cloudy/srv/__init__.py +++ b/cloudy/srv/__init__.py @@ -1,12 +1,12 @@ +import importlib import os import re import types -import importlib -PACKAGE = 'cloudy.srv' +PACKAGE = "cloudy.srv" MODULE_RE = r"^[^.].*\.py$" -PREFIX = ['srv_'] -SKIP = {'__init__.py'} +PREFIX = ["srv_"] +SKIP = {"__init__.py"} functions = [] module_dir = os.path.dirname(__file__) @@ -15,7 +15,7 @@ if fname in SKIP or not re.match(MODULE_RE, fname): continue mod_name = fname[:-3] - module = importlib.import_module(f'{PACKAGE}.{mod_name}') + module = importlib.import_module(f"{PACKAGE}.{mod_name}") for name in dir(module): if any(name.startswith(p) for p in PREFIX): item = getattr(module, name) diff --git a/cloudy/srv/recipe_cache_redis.py b/cloudy/srv/recipe_cache_redis.py index 8c99f3a..8c13d48 100644 --- a/cloudy/srv/recipe_cache_redis.py +++ b/cloudy/srv/recipe_cache_redis.py @@ -1,29 +1,36 @@ +"""Recipe for Redis cache server deployment.""" + from fabric import task -from cloudy.util.context import Context -from cloudy.sys import redis -from cloudy.util.conf import CloudyConfig -from cloudy.sys import firewall + from cloudy.srv import recipe_generic_server +from cloudy.sys import firewall, redis +from cloudy.util.conf import CloudyConfig +from cloudy.util.context import Context + @task @Context.wrap_context -def setup_redis(c: Context, cfg_file=None, generic: bool = True) -> None: +def setup_redis(c: Context, cfg_paths=None, generic: bool = True) -> None: """ - Setup redis server with config files - Ex: fab setup-redis --cfg-file="./.cloudy.generic,./.cloudy.admin" + Setup redis server with comprehensive configuration. + + Installs and configures Redis cache server with memory optimization, + network binding, custom port configuration, and firewall rules. + + Args: + cfg_paths: Comma-separated config file paths + generic: Whether to run generic server setup first + + Example: + fab recipe.redis-install --cfg-paths="./.cloudy.generic,./.cloudy.redis" """ - if cfg_file: - # Split comma-separated files and pass as list - cfg_files = [f.strip() for f in cfg_file.split(',')] - cfg = CloudyConfig(cfg_files) - else: - cfg = CloudyConfig() + cfg = CloudyConfig(cfg_paths) if generic: - recipe_generic_server.setup_server(c) + recipe_generic_server.setup_server(c, cfg_paths) - redis_address: str = cfg.get_variable('CACHESERVER', 'redis-address', '0.0.0.0') - redis_port: str = cfg.get_variable('CACHESERVER', 'redis-port', '6379') + redis_address: str = cfg.get_variable("CACHESERVER", "redis-address", "0.0.0.0") + redis_port: str = cfg.get_variable("CACHESERVER", "redis-port", "6379") # Install and configure redis redis.sys_redis_install(c) @@ -33,4 +40,16 @@ def setup_redis(c: Context, cfg_file=None, generic: bool = True) -> None: redis.sys_redis_configure_port(c, redis_port) # Allow incoming requests - firewall.fw_allow_incoming_port_proto(c, redis_port, 'tcp') + firewall.fw_allow_incoming_port_proto(c, redis_port, "tcp") + + # Success message + print("\n🎉 ✅ REDIS SERVER SETUP COMPLETED SUCCESSFULLY!") + print("📋 Configuration Summary:") + print(f" └── Redis Address: {redis_address}") + print(f" └── Redis Port: {redis_port}") + print(f" └── Firewall: Port {redis_port}/tcp allowed") + print(" └── Memory: Auto-configured (1/2 of system memory)") + print("\n🚀 Redis server is ready for use!") + if generic: + print(f" └── Admin SSH: Port {cfg.get_variable('common', 'ssh-port', '22')}") + print(f" └── Admin User: {cfg.get_variable('common', 'admin-user', 'admin')}") diff --git a/cloudy/srv/recipe_database_psql_gis.py b/cloudy/srv/recipe_database_psql_gis.py index ee5c380..56e63ba 100644 --- a/cloudy/srv/recipe_database_psql_gis.py +++ b/cloudy/srv/recipe_database_psql_gis.py @@ -1,62 +1,90 @@ +"""Recipe for PostgreSQL database server with PostGIS spatial extensions.""" + from fabric import task -from cloudy.util.context import Context -from cloudy.db import psql -from cloudy.db import pgis -from cloudy.sys import core -from cloudy.sys import firewall -from cloudy.sys import user -from cloudy.util.conf import CloudyConfig + +from cloudy.db import pgis, psql from cloudy.srv import recipe_generic_server +from cloudy.sys import core, firewall, user +from cloudy.util.conf import CloudyConfig +from cloudy.util.context import Context + @task @Context.wrap_context -def setup_db(c: Context, cfg_file=None, generic=True): +def setup_db(c: Context, cfg_paths=None, generic=True): """ - Setup db server with config files - Ex: fab setup-db --cfg-file="./.cloudy.generic,./.cloudy.admin" + Setup PostgreSQL database server with PostGIS spatial extensions. + + Installs and configures PostgreSQL with PostGIS for spatial database + operations, including cluster creation, user management, and firewall setup. + + Args: + cfg_paths: Comma-separated config file paths + generic: Whether to run generic server setup first + + Example: + fab recipe.psql-install --cfg-paths="./.cloudy.generic,./.cloudy.db" """ - if cfg_file: - # Split comma-separated files and pass as list - cfg_files = [f.strip() for f in cfg_file.split(',')] - cfg = CloudyConfig(cfg_files) - else: - cfg = CloudyConfig() + cfg = CloudyConfig(cfg_paths) if generic: - c = recipe_generic_server.setup_server(c) + c = recipe_generic_server.setup_server(c, cfg_paths) - dbaddress = cfg.get_variable('dbserver', 'listen-address') - if dbaddress and '*' not in dbaddress: - core.sys_add_hosts(c, 'db-host', dbaddress) + dbaddress = cfg.get_variable("dbserver", "listen-address") + if dbaddress and "*" not in dbaddress: + core.sys_add_hosts(c, "db-host", dbaddress) # postgresql: version, cluster, data_dir - pg_version = cfg.get_variable('dbserver', 'pg-version') - pg_listen_address = cfg.get_variable('dbserver', 'listen-address', '*') - pg_port = cfg.get_variable('dbserver', 'pg-port', '5432') - pg_cluster = cfg.get_variable('dbserver', 'pg-cluster', 'main') - pg_encoding = cfg.get_variable('dbserver', 'pg-encoding', 'UTF-8') - pg_data_dir = cfg.get_variable('dbserver', 'pg-data-dir', '/var/lib/postgresql') + pg_version = cfg.get_variable("dbserver", "pg-version") + pg_listen_address = cfg.get_variable("dbserver", "listen-address", "*") + pg_port = cfg.get_variable("dbserver", "pg-port", "5432") + pg_cluster = cfg.get_variable("dbserver", "pg-cluster", "main") + pg_encoding = cfg.get_variable("dbserver", "pg-encoding", "UTF-8") + pg_data_dir = cfg.get_variable("dbserver", "pg-data-dir", "/var/lib/postgresql") psql.db_psql_install(c, pg_version) psql.db_psql_make_data_dir(c, pg_version, pg_data_dir) psql.db_psql_remove_cluster(c, pg_version, pg_cluster) psql.db_psql_create_cluster(c, pg_version, pg_cluster, pg_encoding, pg_data_dir) psql.db_psql_set_permission(c, pg_version, pg_cluster) - psql.db_psql_configure(c, version=pg_version, port=pg_port, interface=pg_listen_address, restart=True) + psql.db_psql_configure( + c, version=pg_version, port=pg_port, interface=pg_listen_address, restart=True + ) firewall.fw_allow_incoming_port(c, pg_port) # change postgres' db user password - postgres_user_pass = cfg.get_variable('dbserver', 'postgres-pass') + postgres_user_pass = cfg.get_variable("dbserver", "postgres-pass") if postgres_user_pass: - psql.db_psql_user_password(c, 'postgres', postgres_user_pass) + psql.db_psql_user_password(c, "postgres", postgres_user_pass) # change postgres' system user password - postgres_sys_user_pass = cfg.get_variable('dbserver', 'postgres-sys-pass') + postgres_sys_user_pass = cfg.get_variable("dbserver", "postgres-sys-pass") if postgres_sys_user_pass: - user.sys_user_change_password(c, 'postgres', postgres_sys_user_pass) + user.sys_user_change_password(c, "postgres", postgres_sys_user_pass) # pgis version - pgis_version = cfg.get_variable('dbserver', 'pgis-version') + pgis_version = cfg.get_variable("dbserver", "pgis-version") pgis.db_pgis_install(c, pg_version, pgis_version) pgis.db_pgis_configure(c, pg_version, pgis_version) - pgis.db_pgis_get_database_gis_info(c, 'template_postgis') + pgis.db_pgis_get_database_gis_info(c, "template_postgis") + + # Success message + print("\n🎉 ✅ POSTGRESQL + POSTGIS DATABASE SERVER SETUP COMPLETED!") + print("📋 Configuration Summary:") + print(f" └── PostgreSQL Version: {pg_version}") + print(f" └── PostGIS Version: {pgis_version}") + print(f" └── Database Port: {pg_port}") + print(f" └── Listen Address: {pg_listen_address}") + print(f" └── Cluster: {pg_cluster}") + print(f" └── Data Directory: {pg_data_dir}") + print(f" └── Encoding: {pg_encoding}") + print(f" └── Firewall: Port {pg_port} allowed") + if postgres_user_pass: + print(" └── Postgres User: Password configured") + if postgres_sys_user_pass: + print(" └── System User: Password configured") + print("\n🚀 PostgreSQL with PostGIS is ready for spatial database operations!") + if generic: + admin_user = cfg.get_variable("common", "admin-user", "admin") + ssh_port = cfg.get_variable("common", "ssh-port", "22") + print(f" └── Admin SSH: {admin_user}@server:{ssh_port}") diff --git a/cloudy/srv/recipe_generic_server.py b/cloudy/srv/recipe_generic_server.py index 6c40c05..70db45b 100644 --- a/cloudy/srv/recipe_generic_server.py +++ b/cloudy/srv/recipe_generic_server.py @@ -1,107 +1,176 @@ +"""Recipe for generic server setup with comprehensive security configuration.""" + import os import uuid +from typing import Optional + from fabric import task -from cloudy.util.context import Context -from cloudy.sys import core, timezone, swap, postfix, vim, ssh, firewall, user + +from cloudy.sys import core, firewall, postfix, ssh, swap, timezone, user, vim from cloudy.util.conf import CloudyConfig +from cloudy.util.context import Context + @task @Context.wrap_context -def setup_server(c: Context, cfg_file=None) -> 'Context': +def setup_server(c: Context, cfg_paths: Optional[str] = None) -> Context: """ - Setup server with config files - Ex: fab setup-server --cfg-file="./.cloudy.generic,./.cloudy.admin" + Setup a generic server with comprehensive configuration. + + This recipe performs a complete server setup including: + - System initialization and updates + - Git configuration + - Hostname and network setup + - User creation (admin and automation users) + - SSH security configuration + - Firewall setup + - Timezone and locale configuration + - Swap configuration + - Essential package installation + + Args: + cfg_paths: Comma-separated list of config files to use + + Returns: + Updated Context object (may have new connection settings) + + Example: + fab recipe.gen-install --cfg-paths="./.cloudy.generic,./.cloudy.admin" """ - if cfg_file: - # Split comma-separated files and pass as list - cfg_files = [f.strip() for f in cfg_file.split(',')] - cfg = CloudyConfig(cfg_files) - else: - cfg = CloudyConfig() - - # git info + # Initialize configuration + cfg = CloudyConfig(cfg_paths) + + # Read all configuration values upfront + git_user_full_name = cfg.get_variable("common", "git-user-full-name") + git_user_email = cfg.get_variable("common", "git-user-email") + hostname = cfg.get_variable("common", "hostname") + timezone_val = cfg.get_variable("common", "timezone", "America/New_York") + locale_val = cfg.get_variable("common", "locale", "en_US.UTF-8") + swap_size = cfg.get_variable("common", "swap-size") + + # User configuration + admin_user = cfg.get_variable("common", "admin-user") + admin_pass = cfg.get_variable("common", "admin-pass") + admin_groups = cfg.get_variable("common", "admin-groups", "admin,www-data") + + auto_user = cfg.get_variable("auto", "auto-user") + auto_pass = cfg.get_variable("auto", "auto-pass", uuid.uuid4().hex) + auto_groups = cfg.get_variable("auto", "auto-groups", "admin,www-data") + + # SSH and security configuration + ssh_port = cfg.get_variable("common", "ssh-port", "22") + disable_root = cfg.get_boolean_config("common", "ssh-disable-root") + enable_password = cfg.get_boolean_config("common", "ssh-enable-password") + pub_key = cfg.get_variable("common", "ssh-key-path") + + # Validate configuration values + user.validate_user_config(admin_user, admin_pass) + ssh.validate_ssh_config(ssh_port) + + # === SYSTEM INITIALIZATION === core.sys_init(c) core.sys_update(c) - git_user_full_name = cfg.get_variable('common', 'git-user-full-name') - git_user_email = cfg.get_variable('common', 'git-user-email') + # Configure git if credentials provided if git_user_full_name and git_user_email: - core.sys_git_configure(c, 'root', git_user_full_name, git_user_email) + core.sys_git_configure(c, "root", git_user_full_name, git_user_email) - hostname = cfg.get_variable('common', 'hostname') + # Configure hostname if provided if hostname: core.sys_hostname_configure(c, hostname) - core.sys_add_hosts(c, hostname, '127.0.0.1') + core.sys_add_hosts(c, hostname, "127.0.0.1") + # Install essential packages and configure system core.sys_set_ipv4_precedence(c) core.sys_install_common(c) timezone.sys_time_install_common(c) postfix.sys_install_postfix(c) vim.sys_set_default_editor(c) - # timezone and locale - tz = cfg.get_variable('common', 'timezone', 'America/New_York') - timezone.sys_configure_timezone(c, tz) - locale_val = cfg.get_variable('common', 'locale', 'en_US.UTF-8') + # Configure timezone and locale + timezone.sys_configure_timezone(c, timezone_val) core.sys_locale_configure(c, locale_val) - # swap - swap_size = cfg.get_variable('common', 'swap-size') + # Configure swap if specified if swap_size: swap.sys_swap_configure(c, swap_size) - # primary users & passwords - admin_user = cfg.get_variable('common', 'admin-user') - admin_pass = cfg.get_variable('common', 'admin-pass') - admin_groups = cfg.get_variable('common', 'admin-groups', 'admin,www-data') - if admin_user and admin_pass: - user.sys_user_add(c, admin_user) - user.sys_user_change_password(c, admin_user, admin_pass) - user.sys_user_add_sudoer(c, admin_user) - user.sys_user_set_group_umask(c, admin_user) - user.sys_user_create_groups(c, admin_groups) - user.sys_user_add_to_groups(c, admin_user, admin_groups) - shared_key_dir = cfg.get_variable('common', 'shared-key-path') - if shared_key_dir: - ssh.sys_ssh_push_server_shared_keys(c, admin_user, shared_key_dir) - - # automation users & passwords - auto_user = cfg.get_variable('common', 'auto-user') - auto_pass = cfg.get_variable('common', 'auto-pass', uuid.uuid4().hex) - auto_groups = cfg.get_variable('common', 'auto-groups', 'admin,www-data') - if auto_user and auto_pass: - user.sys_user_add(c, auto_user) - user.sys_user_change_password(c, auto_user, auto_pass) - user.sys_user_add_sudoer(c, auto_user) - user.sys_user_set_group_umask(c, auto_user) - user.sys_user_create_groups(c, auto_groups) - user.sys_user_add_to_groups(c, auto_user, auto_groups) - shared_key_dir = cfg.get_variable('common', 'shared-key-path') - if shared_key_dir: - ssh.sys_ssh_push_server_shared_keys(c, auto_user, shared_key_dir) - - # ssh stuff + # === USER CREATION === + # Create admin user with full setup + admin_shared_key_dir = cfg.get_variable("common", "shared-key-path") + user.sys_user_create_with_setup(c, admin_user, admin_pass, admin_groups, admin_shared_key_dir) + + # Create automation user with full setup + auto_shared_key_dir = cfg.get_variable("auto", "shared-key-path") + user.sys_user_create_with_setup(c, auto_user, auto_pass, auto_groups, auto_shared_key_dir) + + # === SSH & SECURITY CONFIGURATION === + # Install and configure firewall firewall.fw_install(c) - ssh_port = cfg.get_variable('common', 'ssh-port', '22') - if ssh_port: + + # Configure SSH port and secure server + if ssh_port != "22": ssh.sys_ssh_set_port(c, ssh_port) c = c.reconnect(new_port=ssh_port) - firewall.fw_secure_server(c, ssh_port) - c = c.reconnect(new_port=ssh_port) - disable_root = cfg.get_variable('common', 'ssh-disable-root') - if auto_user and disable_root and disable_root.upper() == 'YES': - ssh.sys_ssh_disable_root_login(c) - c = c.reconnect(new_port=ssh_port, new_user=auto_user) + firewall.fw_secure_server(c, ssh_port) + c = c.reconnect(new_port=ssh_port) - enable_password = cfg.get_variable('common', 'ssh-enable-password') - if enable_password and enable_password.upper() == 'YES': + # Enable password authentication if requested (before disabling root) + if enable_password: ssh.sys_ssh_enable_password_authentication(c) - pub_key = cfg.get_variable('common', 'ssh-key-path') - if pub_key: - pub_key = os.path.expanduser(pub_key) - if os.path.exists(pub_key) and admin_user: - ssh.sys_ssh_push_public_key(c, admin_user, pub_key) + # Install public key for admin user BEFORE disabling root login + if pub_key and admin_user: + pub_key_path = os.path.expanduser(pub_key) + if os.path.exists(pub_key_path): + ssh.sys_ssh_push_public_key(c, admin_user, pub_key_path) + + # Disable root login if configured and admin user exists with SSH key + if admin_user and disable_root and pub_key: + ssh.sys_ssh_disable_root_login(c) + c = c.reconnect(new_port=ssh_port, new_user=admin_user) + + # Verify the new admin user connection and sudo access + c.run("uname -a", echo=True) + c.run("id", echo=True) + + # Test sudo access by providing the password + admin_pass = cfg.get_variable("common", "admin-pass") + if admin_pass: + result = c.run(f"echo '{admin_pass}' | sudo -S whoami", echo=True, warn=True) + if result.return_code == 0: + print( + f"✅ Successfully connected as {admin_user} with SSH key authentication " + "and sudo access" + ) + else: + print(f"⚠️ Connected as {admin_user} with SSH keys, but sudo test failed") + else: + print( + f"✅ Successfully connected as {admin_user} with SSH key authentication " + "(sudo not tested - no password available)" + ) + + # Success message for generic server setup + print("\n🎉 ✅ GENERIC SERVER SETUP COMPLETED SUCCESSFULLY!") + print("📋 Configuration Summary:") + print(f" ├── Hostname: {hostname or 'Not configured'}") + print(f" ├── Timezone: {timezone_val}") + print(f" ├── Locale: {locale_val}") + if swap_size: + print(f" ├── Swap: {swap_size}") + print(f" ├── Admin User: {admin_user} (groups: {admin_groups})") + print(f" ├── Auto User: {auto_user} (groups: {auto_groups})") + print(f" ├── SSH Port: {ssh_port}") + print(f" ├── Root Login: {'Disabled' if disable_root else 'Enabled'}") + print(f" ├── Password Auth: {'Enabled' if enable_password else 'Disabled'}") + print(f" ├── SSH Keys: {'Configured' if pub_key else 'Not configured'}") + print(" └── Firewall: UFW enabled and configured") + print("\n🚀 Generic server foundation is ready for specialized deployments!") + if admin_user and disable_root: + print(f" └── SSH Access: {admin_user}@server:{ssh_port} (key-based authentication)") + else: + print(f" └── SSH Access: root@server:{ssh_port}") - return c \ No newline at end of file + return c diff --git a/cloudy/srv/recipe_loadbalancer_nginx.py b/cloudy/srv/recipe_loadbalancer_nginx.py index 8899f48..dfb7650 100644 --- a/cloudy/srv/recipe_loadbalancer_nginx.py +++ b/cloudy/srv/recipe_loadbalancer_nginx.py @@ -1,41 +1,70 @@ +"""Recipe for Nginx load balancer deployment with SSL support.""" + from fabric import task -from cloudy.util.context import Context + +from cloudy.srv import recipe_generic_server from cloudy.sys import firewall -from cloudy.web import nginx from cloudy.util.conf import CloudyConfig -from cloudy.srv import recipe_generic_server +from cloudy.util.context import Context +from cloudy.web import nginx + @task @Context.wrap_context -def setup_lb(c: Context, cfg_file=None, generic=True): +def setup_lb(c: Context, cfg_paths=None, generic=True): """ - Setup lb server with config files - Ex: fab setup-lb --cfg-file="./.cloudy.generic,./.cloudy.admin" + Setup Nginx load balancer with SSL support. + + Installs and configures Nginx as a reverse proxy load balancer with + HTTP/HTTPS support, SSL certificate management, and upstream server + configuration for high-availability web applications. + + Args: + cfg_paths: Comma-separated config file paths + generic: Whether to run generic server setup first + + Example: + fab recipe.lb-install --cfg-paths="./.cloudy.generic,./.cloudy.lb" """ - if cfg_file: - # Split comma-separated files and pass as list - cfg_files = [f.strip() for f in cfg_file.split(',')] - cfg = CloudyConfig(cfg_files) - else: - cfg = CloudyConfig() + cfg = CloudyConfig(cfg_paths) if generic: - c = recipe_generic_server.setup_server(c) + c = recipe_generic_server.setup_server(c, cfg_paths) firewall.fw_allow_incoming_http(c) firewall.fw_allow_incoming_https(c) # install nginx nginx.web_nginx_install(c) - protocol = 'http' - domain_name = cfg.get_variable('webserver', 'domain-name', 'example.com') - certificate_path = cfg.get_variable('common', 'certificate-path') + protocol = "http" + domain_name = cfg.get_variable("webserver", "domain-name", "example.com") + certificate_path = cfg.get_variable("common", "certificate-path") if certificate_path: nginx.web_nginx_copy_ssl(c, domain_name, certificate_path) - protocol = 'https' + protocol = "https" - binding_address = cfg.get_variable('webserver', 'binding-address', '*') - upstream_address = cfg.get_variable('webserver', 'upstream-address') - upstream_port = cfg.get_variable('webserver', 'upstream-port', '8181') + binding_address = cfg.get_variable("webserver", "binding-address", "*") + upstream_address = cfg.get_variable("webserver", "upstream-address") + upstream_port = cfg.get_variable("webserver", "upstream-port", "8181") if upstream_address and upstream_port: - nginx.web_nginx_setup_domain(c, domain_name, protocol, binding_address, upstream_address, upstream_port) + nginx.web_nginx_setup_domain( + c, domain_name, protocol, binding_address, upstream_address, upstream_port + ) + + # Success message + print("\n🎉 ✅ NGINX LOAD BALANCER SETUP COMPLETED SUCCESSFULLY!") + print("📋 Configuration Summary:") + print(f" └── Domain: {domain_name}") + print(f" └── Protocol: {protocol.upper()}") + print(f" └── Binding Address: {binding_address}") + if upstream_address and upstream_port: + print(f" └── Upstream: {upstream_address}:{upstream_port}") + if certificate_path: + print(" └── SSL Certificate: Configured") + print(" └── Firewall: HTTP (80) and HTTPS (443) allowed") + print("\n🚀 Nginx load balancer is ready to serve traffic!") + if generic: + admin_user = cfg.get_variable("common", "admin-user", "admin") + ssh_port = cfg.get_variable("common", "ssh-port", "22") + print(f" └── Admin SSH: {admin_user}@server:{ssh_port}") + print(f"\n🌍 Access your site at: {protocol}://{domain_name}") diff --git a/cloudy/srv/recipe_standalone_server.py b/cloudy/srv/recipe_standalone_server.py index 7cef173..a573cf7 100644 --- a/cloudy/srv/recipe_standalone_server.py +++ b/cloudy/srv/recipe_standalone_server.py @@ -1,102 +1,103 @@ +"""Recipe for complete standalone server with all services integrated.""" + from fabric import task -from cloudy.util.context import Context -from cloudy.util.conf import CloudyConfig -from cloudy.sys import core -from cloudy.sys import firewall -from cloudy.sys import user -from cloudy.db import psql -from cloudy.db import pgis -from cloudy.db import pgpool -from cloudy.web import apache -from cloudy.web import supervisor -from cloudy.web import nginx -from cloudy.web import geoip -from cloudy.web import www -from cloudy.sys import python + +from cloudy.db import pgis, pgpool, psql from cloudy.srv import recipe_generic_server +from cloudy.sys import core, firewall, python, user +from cloudy.util.conf import CloudyConfig +from cloudy.util.context import Context +from cloudy.web import apache, geoip, nginx, supervisor, www + @task @Context.wrap_context -def setup_standalone(c: Context, cfg_file=None) -> None: +def setup_standalone(c: Context, cfg_paths=None) -> None: """ - Setup standalone server with config files - Ex: fab setup-standalone --cfg-file="./.cloudy.generic,./.cloudy.admin" + Setup complete standalone server with all services integrated. + + Deploys a comprehensive all-in-one server combining generic server setup, + PostgreSQL database with PostGIS, Django web server, and Nginx load balancer. + Perfect for single-server deployments requiring full stack functionality. + + Args: + cfg_paths: Comma-separated config file paths + + Example: + fab recipe.sta-install --cfg-paths="./.cloudy.generic,./.cloudy.standalone" """ - if cfg_file: - # Split comma-separated files and pass as list - cfg_files = [f.strip() for f in cfg_file.split(',')] - cfg = CloudyConfig(cfg_files) - else: - cfg = CloudyConfig() + cfg = CloudyConfig(cfg_paths) # ====== Generic Server ========= - c = recipe_generic_server.setup_server(c) + c = recipe_generic_server.setup_server(c, cfg_paths) # ====== Database Server ========= - dbaddress = cfg.get_variable('dbserver', 'listen-address') - if dbaddress and '*' not in dbaddress: - core.sys_add_hosts(c, 'db-host', dbaddress) + dbaddress = cfg.get_variable("dbserver", "listen-address") + if dbaddress and "*" not in dbaddress: + core.sys_add_hosts(c, "db-host", dbaddress) - pg_version = cfg.get_variable('dbserver', 'pg-version') - pg_listen_address = cfg.get_variable('dbserver', 'listen-address', '*') - pg_port = cfg.get_variable('dbserver', 'pg-port', '5432') - pg_cluster = cfg.get_variable('dbserver', 'pg-cluster', 'main') - pg_encoding = cfg.get_variable('dbserver', 'pg-encoding', 'UTF-8') - pg_data_dir = cfg.get_variable('dbserver', 'pg-data-dir', '/var/lib/postgresql') + pg_version = cfg.get_variable("dbserver", "pg-version") + pg_listen_address = cfg.get_variable("dbserver", "listen-address", "*") + pg_port = cfg.get_variable("dbserver", "pg-port", "5432") + pg_cluster = cfg.get_variable("dbserver", "pg-cluster", "main") + pg_encoding = cfg.get_variable("dbserver", "pg-encoding", "UTF-8") + pg_data_dir = cfg.get_variable("dbserver", "pg-data-dir", "/var/lib/postgresql") psql.db_psql_install(c, pg_version) psql.db_psql_make_data_dir(c, pg_version, pg_data_dir) psql.db_psql_remove_cluster(c, pg_version, pg_cluster) psql.db_psql_create_cluster(c, pg_version, pg_cluster, pg_encoding, pg_data_dir) psql.db_psql_set_permission(c, pg_version, pg_cluster) - psql.db_psql_configure(c, version=pg_version, port=pg_port, interface=pg_listen_address, restart=True) + psql.db_psql_configure( + c, version=pg_version, port=pg_port, interface=pg_listen_address, restart=True + ) # change postgres' db user password - postgres_user_pass = cfg.get_variable('dbserver', 'postgres-pass') + postgres_user_pass = cfg.get_variable("dbserver", "postgres-pass") if postgres_user_pass: - psql.db_psql_user_password(c, 'postgres', postgres_user_pass) + psql.db_psql_user_password(c, "postgres", postgres_user_pass) # change postgres' system user password - postgres_sys_user_pass = cfg.get_variable('dbserver', 'postgres-sys-pass') + postgres_sys_user_pass = cfg.get_variable("dbserver", "postgres-sys-pass") if postgres_sys_user_pass: - user.sys_user_change_password(c, 'postgres', postgres_sys_user_pass) + user.sys_user_change_password(c, "postgres", postgres_sys_user_pass) # pgis version - pgis_version = cfg.get_variable('dbserver', 'pgis-version') + pgis_version = cfg.get_variable("dbserver", "pgis-version") pgis.db_pgis_install(c, pg_version, pgis_version) pgis.db_pgis_configure(c, pg_version, pgis_version) - pgis.db_pgis_get_database_gis_info(c, 'template_postgis') + pgis.db_pgis_get_database_gis_info(c, "template_postgis") pgpool.db_pgpool2_install(c) - db_host = cfg.get_variable('dbserver', 'db-host') + db_host = cfg.get_variable("dbserver", "db-host") if db_host: - db_port = cfg.get_variable('dbserver', 'db-port', '5432') + db_port = cfg.get_variable("dbserver", "db-port", "5432") pgpool.db_pgpool2_configure(c, dbhost=db_host, dbport=db_port) - db_listen_address = cfg.get_variable('dbserver', 'listen-address') + db_listen_address = cfg.get_variable("dbserver", "listen-address") if db_listen_address: core.sys_add_hosts(c, db_host, db_listen_address) # ====== Web Server ========= - py_version = cfg.get_variable('common', 'python-version') + py_version = cfg.get_variable("common", "python-version") python.sys_python_install_common(c, py_version) - webserver = cfg.get_variable('webserver', 'webserver') - if webserver and webserver.lower() == 'apache': + webserver = cfg.get_variable("webserver", "webserver") + if webserver and webserver.lower() == "apache": apache.web_apache2_install(c) apache.web_apache2_install_mods(c) - elif webserver and webserver.lower() == 'gunicorn': + elif webserver and webserver.lower() == "gunicorn": supervisor.web_supervisor_install(c) www.web_create_data_directory(c) # hostname, cache server - cache_host = cfg.get_variable('cacheserver', 'cache-host') - cache_listen_address = cfg.get_variable('cacheserver', 'listen-address') + cache_host = cfg.get_variable("cacheserver", "cache-host") + cache_listen_address = cfg.get_variable("cacheserver", "listen-address") if cache_host and cache_listen_address: core.sys_add_hosts(c, cache_host, cache_listen_address) # geoIP - geo_ip = cfg.get_variable('webserver', 'geo-ip') + geo_ip = cfg.get_variable("webserver", "geo-ip") if geo_ip: geoip.web_geoip_install_requirements(c) geoip.web_geoip_install_maxmind_api(c) @@ -108,16 +109,51 @@ def setup_standalone(c: Context, cfg_file=None) -> None: firewall.fw_allow_incoming_https(c) nginx.web_nginx_install(c) - protocol = 'http' - domain_name = cfg.get_variable('webserver', 'domain-name', 'example.com') - certificate_path = cfg.get_variable('common', 'certificate-path') + protocol = "http" + domain_name = cfg.get_variable("webserver", "domain-name", "example.com") + certificate_path = cfg.get_variable("common", "certificate-path") if certificate_path: nginx.web_nginx_copy_ssl(c, domain_name, certificate_path) - protocol = 'https' + protocol = "https" - binding_address = cfg.get_variable('webserver', 'binding-address', '*') - upstream_address = cfg.get_variable('webserver', 'upstream-address') - upstream_port = cfg.get_variable('webserver', 'upstream-port', '8181') + binding_address = cfg.get_variable("webserver", "binding-address", "*") + upstream_address = cfg.get_variable("webserver", "upstream-address") + upstream_port = cfg.get_variable("webserver", "upstream-port", "8181") if upstream_address and upstream_port: - nginx.web_nginx_setup_domain(c, domain_name, protocol, binding_address, upstream_address, upstream_port) - + nginx.web_nginx_setup_domain( + c, domain_name, protocol, binding_address, upstream_address, upstream_port + ) + + # Success message + print("\n🎉 ✅ STANDALONE SERVER SETUP COMPLETED SUCCESSFULLY!") + print("📋 Complete All-in-One Configuration Summary:") + print("\n📊 DATABASE SERVER:") + print(f" └── PostgreSQL: {pg_version} with PostGIS {pgis_version}") + print(f" └── Database Port: {pg_port}") + print(f" └── Listen Address: {pg_listen_address}") + print(f" └── Data Directory: {pg_data_dir}") + print("\n🌍 WEB SERVER:") + print(f" └── Python Version: {py_version or 'System default'}") + print(f" └── Web Server: {webserver or 'Not specified'}") + print(" └── Web Directory: /var/www") + if geo_ip: + print(" └── GeoIP: MaxMind databases installed") + print("\n🔄 LOAD BALANCER:") + print(" └── Nginx: Configured as reverse proxy") + print(f" └── Domain: {domain_name}") + print(f" └── Protocol: {protocol.upper()}") + if upstream_address and upstream_port: + print(f" └── Upstream: {upstream_address}:{upstream_port}") + if certificate_path: + print(" └── SSL Certificate: Configured") + print("\n🔥 ADDITIONAL FEATURES:") + if cache_host: + print(f" └── Cache Server: {cache_host}") + if db_host: + print(" └── PgPool: Connection pooling configured") + print(" └── Firewall: HTTP/HTTPS traffic allowed") + print("\n🚀 Standalone server is fully operational with database, web, and load balancing!") + admin_user = cfg.get_variable("common", "admin-user", "admin") + ssh_port = cfg.get_variable("common", "ssh-port", "22") + print(f" └── Admin SSH: {admin_user}@server:{ssh_port}") + print(f"\n🌍 Access your application at: {protocol}://{domain_name}") diff --git a/cloudy/srv/recipe_vpn_server.py b/cloudy/srv/recipe_vpn_server.py index 0b1a574..116d39c 100644 --- a/cloudy/srv/recipe_vpn_server.py +++ b/cloudy/srv/recipe_vpn_server.py @@ -1,48 +1,54 @@ +"""Recipe for OpenVPN server deployment with Docker containerization.""" + from fabric import task -from cloudy.util.context import Context -from cloudy.sys import docker -from cloudy.sys import openvpn -from cloudy.sys import firewall -from cloudy.util.conf import CloudyConfig + from cloudy.srv import recipe_generic_server -from cloudy.sys import core +from cloudy.sys import core, docker, firewall, openvpn +from cloudy.util.conf import CloudyConfig +from cloudy.util.context import Context + @task @Context.wrap_context -def setup_openvpn(c: Context, cfg_file=None, generic=True): +def setup_openvpn(c: Context, cfg_paths=None, generic=True): """ - Setup vpn server with config files - Ex: fab setup-openvpn --cfg-file="./.cloudy.generic,./.cloudy.admin" + Setup OpenVPN server with Docker containerization. + + Installs Docker and deploys OpenVPN server in containers with dual-protocol + support (UDP and TCP), certificate management, and firewall configuration + for secure VPN access. + + Args: + cfg_paths: Comma-separated config file paths + generic: Whether to run generic server setup first + + Example: + fab recipe.vpn-install --cfg-paths="./.cloudy.generic,./.cloudy.vpn" """ - if cfg_file: - # Split comma-separated files and pass as list - cfg_files = [f.strip() for f in cfg_file.split(',')] - cfg = CloudyConfig(cfg_files) - else: - cfg = CloudyConfig() + cfg = CloudyConfig(cfg_paths) if generic: - c = recipe_generic_server.setup_server(c) + c = recipe_generic_server.setup_server(c, cfg_paths) # Install and configure Docker for OpenVPN - admin_user = cfg.get_variable('common', 'admin-user') + admin_user = cfg.get_variable("common", "admin-user") docker.sys_docker_install(c) docker.sys_docker_config(c) docker.sys_docker_user_group(c, admin_user) - domain = cfg.get_variable('VPNSERVER', 'vpn-domain') + domain = cfg.get_variable("VPNSERVER", "vpn-domain") if not domain: print("domain is missing from VPNSERVER section") return - passphrase = cfg.get_variable('VPNSERVER', 'passphrase', 'nopass') - repository = cfg.get_variable('VPNSERVER', 'repo', 'kylemanna/openvpn') - datadir = cfg.get_variable('VPNSERVER', 'data-dir', '/docker/openvpn') + passphrase = cfg.get_variable("VPNSERVER", "passphrase", "nopass") + repository = cfg.get_variable("VPNSERVER", "repo", "kylemanna/openvpn") + datadir = cfg.get_variable("VPNSERVER", "data-dir", "/docker/openvpn") core.sys_mkdir(c, datadir) # Primary OpenVPN instance - primary_port = cfg.get_variable('VPNSERVER', 'primary-port', '80') - primary_proto = cfg.get_variable('VPNSERVER', 'primary-proto', 'udp') + primary_port = cfg.get_variable("VPNSERVER", "primary-port", "80") + primary_proto = cfg.get_variable("VPNSERVER", "primary-proto", "udp") if primary_port and primary_proto: openvpn.sys_openvpn_docker_install( c, @@ -51,14 +57,14 @@ def setup_openvpn(c: Context, cfg_file=None, generic=True): proto=primary_proto, passphrase=passphrase, datadir=datadir, - repo=repository + repo=repository, ) openvpn.sys_openvpn_docker_conf(c, domain, primary_port, primary_proto) firewall.fw_allow_incoming_port_proto(c, primary_port, primary_proto) # Secondary OpenVPN instance - secondary_port = cfg.get_variable('VPNSERVER', 'secondary-port', '443') - secondary_proto = cfg.get_variable('VPNSERVER', 'secondary-proto', 'tcp') + secondary_port = cfg.get_variable("VPNSERVER", "secondary-port", "443") + secondary_proto = cfg.get_variable("VPNSERVER", "secondary-proto", "tcp") if secondary_port and secondary_proto: openvpn.sys_openvpn_docker_install( c, @@ -67,7 +73,26 @@ def setup_openvpn(c: Context, cfg_file=None, generic=True): proto=secondary_proto, passphrase=passphrase, datadir=datadir, - repo=repository + repo=repository, ) openvpn.sys_openvpn_docker_conf(c, domain, secondary_port, secondary_proto) firewall.fw_allow_incoming_port_proto(c, secondary_port, secondary_proto) + + # Success message + print("\n🎉 ✅ OPENVPN SERVER SETUP COMPLETED SUCCESSFULLY!") + print("📋 Configuration Summary:") + print(f" └── Domain: {domain}") + print(f" └── Data Directory: {datadir}") + print(f" └── Docker Repository: {repository}") + print(f" └── Admin User: {admin_user} (added to docker group)") + if primary_port and primary_proto: + print(f" └── Primary VPN: {primary_port}/{primary_proto.upper()}") + if secondary_port and secondary_proto: + print(f" └── Secondary VPN: {secondary_port}/{secondary_proto.upper()}") + print(f" └── Passphrase: {'Configured' if passphrase != 'nopass' else 'Default (nopass)'}") + print("\n🚀 OpenVPN server is ready! Generate client certificates to connect.") + if generic: + admin_user = cfg.get_variable("common", "admin-user", "admin") + ssh_port = cfg.get_variable("common", "ssh-port", "22") + print(f" └── Admin SSH: {admin_user}@server:{ssh_port}") + print("\n📝 Next steps: Use OpenVPN container commands to generate client configs") diff --git a/cloudy/srv/recipe_webserver_django.py b/cloudy/srv/recipe_webserver_django.py index 2164385..63ac4a9 100644 --- a/cloudy/srv/recipe_webserver_django.py +++ b/cloudy/srv/recipe_webserver_django.py @@ -1,84 +1,108 @@ +"""Recipe for Django web server deployment with database integration.""" + from fabric import task -from cloudy.util.context import Context -from cloudy.sys import core -from cloudy.sys import python -from cloudy.web import apache -from cloudy.web import supervisor -from cloudy.web import geoip -from cloudy.sys import firewall -from cloudy.db import pgpool -from cloudy.web import www -from cloudy.db import psql -from cloudy.db import pgis -from cloudy.util.conf import CloudyConfig + +from cloudy.db import pgis, pgpool, psql from cloudy.srv import recipe_generic_server +from cloudy.sys import core, firewall, python +from cloudy.util.conf import CloudyConfig +from cloudy.util.context import Context +from cloudy.web import apache, geoip, supervisor, www + @task @Context.wrap_context -def setup_web(c: Context, cfg_file=None, generic=True): +def setup_web(c: Context, cfg_paths=None, generic=True): """ - Setup web server with config files - Ex: fab setup-web --cfg-file="./.cloudy.generic,./.cloudy.admin" + Setup Django web server with comprehensive configuration. + + Installs and configures web server (Apache/Gunicorn), Python environment, + PostgreSQL with PostGIS, PgPool connection pooling, GeoIP databases, + and sets up web directories for Django applications. + + Args: + cfg_paths: Comma-separated config file paths + generic: Whether to run generic server setup first + + Example: + fab recipe.web-install --cfg-paths="./.cloudy.generic,./.cloudy.web" """ - if cfg_file: - # Split comma-separated files and pass as list - cfg_files = [f.strip() for f in cfg_file.split(',')] - cfg = CloudyConfig(cfg_files) - else: - cfg = CloudyConfig() + cfg = CloudyConfig(cfg_paths) if generic: - recipe_generic_server.setup_server(c) + recipe_generic_server.setup_server(c, cfg_paths) # hostname, ips - hostname = cfg.get_variable('common', 'hostname') + hostname = cfg.get_variable("common", "hostname") if hostname: core.sys_hostname_configure(c, hostname) - core.sys_add_hosts(c, hostname, '127.0.0.1') + core.sys_add_hosts(c, hostname, "127.0.0.1") # setup python stuff - py_version = cfg.get_variable('common', 'python-version') + py_version = cfg.get_variable("common", "python-version") python.sys_python_install_common(c, py_version) # install webserver - webserver = cfg.get_variable('webserver', 'webserver') - if webserver and webserver.lower() == 'apache': + webserver = cfg.get_variable("webserver", "webserver") + if webserver and webserver.lower() == "apache": apache.web_apache2_install(c) apache.web_apache2_install_mods(c) - elif webserver and webserver.lower() == 'gunicorn': + elif webserver and webserver.lower() == "gunicorn": supervisor.web_supervisor_install(c) # create web directory www.web_create_data_directory(c) - webserver_port = cfg.get_variable('webserver', 'webserver-port') + webserver_port = cfg.get_variable("webserver", "webserver-port") if webserver_port: firewall.fw_allow_incoming_port(c, webserver_port) # hostname, cache server - cache_host = cfg.get_variable('cacheserver', 'cache-host') - cache_listen_address = cfg.get_variable('cacheserver', 'listen-address') + cache_host = cfg.get_variable("cacheserver", "cache-host") + cache_listen_address = cfg.get_variable("cacheserver", "listen-address") if cache_host and cache_listen_address: core.sys_add_hosts(c, cache_host, cache_listen_address) # create db related - pg_version = cfg.get_variable('dbserver', 'pg-version') + pg_version = cfg.get_variable("dbserver", "pg-version") psql.db_psql_install(c, pg_version) - pgis_version = cfg.get_variable('dbserver', 'pgis-version') + pgis_version = cfg.get_variable("dbserver", "pgis-version") pgis.db_pgis_install(c, pg_version, pgis_version) pgpool.db_pgpool2_install(c) - db_host = cfg.get_variable('dbserver', 'db-host') + db_host = cfg.get_variable("dbserver", "db-host") if db_host: - db_port = cfg.get_variable('dbserver', 'db-port', '5432') + db_port = cfg.get_variable("dbserver", "db-port", "5432") pgpool.db_pgpool2_configure(c, dbhost=db_host, dbport=db_port) - db_listen_address = cfg.get_variable('dbserver', 'listen-address') + db_listen_address = cfg.get_variable("dbserver", "listen-address") if db_listen_address: core.sys_add_hosts(c, db_host, db_listen_address) - geo_ip = cfg.get_variable('webserver', 'geo-ip') + geo_ip = cfg.get_variable("webserver", "geo-ip") if geo_ip: geoip.web_geoip_install_requirements(c) geoip.web_geoip_install_maxmind_api(c) geoip.web_geoip_install_maxmind_country(c) geoip.web_geoip_install_maxmind_city(c) + + # Success message + print("\n🎉 ✅ DJANGO WEB SERVER SETUP COMPLETED SUCCESSFULLY!") + print("📋 Configuration Summary:") + print(f" └── Hostname: {hostname or 'Not configured'}") + print(f" └── Python Version: {py_version or 'System default'}") + print(f" └── Web Server: {webserver or 'Not specified'}") + if webserver_port: + print(f" └── Web Port: {webserver_port} (firewall allowed)") + print(f" └── PostgreSQL: {pg_version} with PostGIS {pgis_version}") + if cache_host: + print(f" └── Cache Server: {cache_host}:{cache_listen_address}") + if db_host: + print(f" └── Database: {db_host}:{db_port} via PgPool") + if geo_ip: + print(" └── GeoIP: MaxMind databases installed") + print(" └── Web Directory: /var/www") + print("\n🚀 Django web server is ready for application deployment!") + if generic: + admin_user = cfg.get_variable("common", "admin-user", "admin") + ssh_port = cfg.get_variable("common", "ssh-port", "22") + print(f" └── Admin SSH: {admin_user}@{hostname or 'server'}:{ssh_port}") diff --git a/cloudy/sys/core.py b/cloudy/sys/core.py index add3a4b..617077b 100644 --- a/cloudy/sys/core.py +++ b/cloudy/sys/core.py @@ -1,168 +1,204 @@ import os import time from typing import Optional + from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_log_error(c: Context, msg: str, exc: Exception) -> None: print(f"{msg}: {exc}") + @task @Context.wrap_context def sys_start_service(c: Context, service: str) -> None: """Start a systemd service.""" - c.sudo(f'systemctl start {service}') + c.sudo(f"systemctl start {service}") + @task @Context.wrap_context def sys_stop_service(c: Context, service: str) -> None: """Stop a systemd service.""" - c.sudo(f'systemctl stop {service}') + c.sudo(f"systemctl stop {service}") + @task @Context.wrap_context def sys_reload_service(c: Context, service: str) -> None: """Reload a systemd service.""" - c.sudo(f'systemctl reload {service}') + c.sudo(f"systemctl reload {service}") + @task @Context.wrap_context def sys_restart_service(c: Context, service: str) -> None: """Restart a systemd service safely.""" - c.sudo(f'systemctl stop {service}', warn=True) + c.sudo(f"systemctl stop {service}", warn=True) time.sleep(2) - c.sudo(f'systemctl start {service}') + c.sudo(f"systemctl start {service}") time.sleep(2) + @task @Context.wrap_context def sys_init(c: Context) -> None: """Remove needrestart package if present (to avoid unnecessary restarts).""" - c.sudo('apt remove -y needrestart', warn=False) - c.sudo('apt autoremove -y', warn=False) + c.sudo("apt remove -y needrestart", warn=False) + c.sudo("apt autoremove -y", warn=False) + @task @Context.wrap_context def sys_update(c: Context) -> None: """Update package repositories.""" - c.sudo('apt -y update') - c.sudo('apt list --upgradable', warn=True) - sys_etc_git_commit(c, 'Updated package repositories') + c.sudo("apt -y update") + c.sudo("apt list --upgradable", warn=True) + sys_etc_git_commit(c, "Updated package repositories") + @task @Context.wrap_context def sys_upgrade(c: Context) -> None: """Perform a full system upgrade and reboot.""" - c.sudo('apt install -y aptitude') - c.sudo('apt update') - c.sudo('DEBIAN_FRONTEND=noninteractive aptitude -y upgrade') - sys_etc_git_commit(c, 'Upgraded the system') - c.sudo('shutdown -r now') + c.sudo("apt install -y aptitude") + c.sudo("apt update") + c.sudo("DEBIAN_FRONTEND=noninteractive aptitude -y upgrade") + sys_etc_git_commit(c, "Upgraded the system") + c.sudo("shutdown -r now") + @task @Context.wrap_context def sys_safe_upgrade(c: Context) -> None: """Perform a safe system upgrade and reboot.""" - c.sudo('apt install -y aptitude') - c.sudo('apt upgrade -y') - c.sudo('DEBIAN_FRONTEND=noninteractive aptitude -y safe-upgrade') - sys_etc_git_commit(c, 'Upgraded the system safely') - c.sudo('shutdown -r now') + c.sudo("apt install -y aptitude") + c.sudo("apt upgrade -y") + c.sudo("DEBIAN_FRONTEND=noninteractive aptitude -y safe-upgrade") + sys_etc_git_commit(c, "Upgraded the system safely") + c.sudo("shutdown -r now") + @task @Context.wrap_context def sys_git_install(c: Context) -> None: """Install the latest version of git.""" - c.sudo('apt update') - c.sudo('apt -y install git') + c.sudo("apt update") + c.sudo("apt -y install git") + @task @Context.wrap_context def sys_install_common(c: Context) -> None: """Install a set of common system utilities.""" requirements = [ - 'build-essential', 'gcc', 'subversion', 'mercurial', 'wget', 'vim', 'less', 'sudo', - 'redis-tools', 'curl', 'apt-transport-https', 'ca-certificates', - 'software-properties-common', 'net-tools', 'ntpsec' + "build-essential", + "gcc", + "subversion", + "mercurial", + "wget", + "vim", + "less", + "sudo", + "redis-tools", + "curl", + "apt-transport-https", + "ca-certificates", + "software-properties-common", + "net-tools", + "ntpsec", ] c.sudo(f'apt -y install {" ".join(requirements)}') + @task @Context.wrap_context def sys_git_configure(c: Context, user: str, name: str, email: str) -> None: """Configure git for a given user.""" - c.sudo('apt install -y git-core') + c.sudo("apt install -y git-core") c.sudo(f'sudo -u {user} git config --global user.name "{name}"', warn=True) c.sudo(f'sudo -u {user} git config --global user.email "{email}"', warn=True) - sys_etc_git_commit(c, f'Configured git for user: {user}') + sys_etc_git_commit(c, f"Configured git for user: {user}") + @task @Context.wrap_context def sys_add_hosts(c: Context, host: str, ip: str) -> None: """Add or update an entry in /etc/hosts.""" - host_file = '/etc/hosts' + host_file = "/etc/hosts" c.sudo(f"sed -i '/\\s*{host}\\s*.*/d' {host_file}") c.sudo(f"sed -i '1i{ip}\t{host}' {host_file}") - sys_etc_git_commit(c, f'Added host:{host}, ip:{ip} to: {host_file}') + sys_etc_git_commit(c, f"Added host:{host}, ip:{ip} to: {host_file}") + @task @Context.wrap_context def sys_hostname_configure(c: Context, hostname: str) -> None: """Configure the system hostname.""" - c.sudo(f'echo "{hostname}" > /etc/hostname') - c.sudo('hostname -F /etc/hostname') - sys_etc_git_commit(c, f'Configured hostname to: {hostname}') + c.sudo(f"sh -c 'echo {hostname} > /etc/hostname'") + c.sudo("hostname -F /etc/hostname") + sys_etc_git_commit(c, f"Configured hostname to: {hostname}") + @task @Context.wrap_context -def sys_locale_configure(c: Context, locale: str = 'en_US.UTF-8') -> None: +def sys_locale_configure(c: Context, locale: str = "en_US.UTF-8") -> None: """Configure the system locale.""" - c.sudo('DEBIAN_FRONTEND=noninteractive dpkg-reconfigure locales') - c.sudo(f'update-locale LANG={locale}') + c.sudo("DEBIAN_FRONTEND=noninteractive dpkg-reconfigure locales") + c.sudo(f"update-locale LANG={locale}") + @task @Context.wrap_context def sys_uname(c: Context) -> None: """Display remote system information.""" - c.run('uname -a') + c.run("uname -a") + @task @Context.wrap_context def sys_show_process_by_memory_usage(c: Context) -> None: """List processes by memory usage.""" - c.run('ps -eo pmem,pcpu,rss,vsize,args | sort -k 1 -r') + c.run("ps -eo pmem,pcpu,rss,vsize,args | sort -k 1 -r") + @task @Context.wrap_context def sys_show_disk_io(c: Context) -> None: """List disk I/O statistics.""" - c.run('iostat -d -x 2 5') + c.run("iostat -d -x 2 5") + @task @Context.wrap_context def sys_shutdown(c: Context, restart: bool = True) -> None: """Shutdown or restart the host.""" - c.sudo('shutdown -r now' if restart else 'shutdown now') + c.sudo("shutdown -r now" if restart else "shutdown now") + @task @Context.wrap_context def sys_add_default_startup(c: Context, program: str) -> None: """Enable a program to start at system boot.""" - c.sudo(f'systemctl enable {program}') + c.sudo(f"systemctl enable {program}") + @task @Context.wrap_context def sys_remove_default_startup(c: Context, program: str) -> None: """Disable a program from starting at system boot.""" - c.sudo(f'systemctl stop {program}', warn=True) - c.sudo(f'systemctl disable {program}') + c.sudo(f"systemctl stop {program}", warn=True) + c.sudo(f"systemctl disable {program}") + @task @Context.wrap_context -def sys_mkdir(c: Context, path: str = '', owner: str = '', group: str = '') -> Optional[str]: +def sys_mkdir(c: Context, path: str = "", owner: str = "", group: str = "") -> Optional[str]: """Create a directory and optionally set owner/group.""" if not path: return None @@ -177,50 +213,50 @@ def sys_mkdir(c: Context, path: str = '', owner: str = '', group: str = '') -> O sys_log_error(c, f"Failed to create directory {path}", e) return None + @task @Context.wrap_context def sys_hold_package(c: Context, package: str) -> None: """Prevent a package from being updated (hold the version).""" try: - c.sudo(f'apt-mark hold {package}') + c.sudo(f"apt-mark hold {package}") except Exception as e: sys_log_error(c, f"Failed to hold package {package}", e) + @task @Context.wrap_context def sys_unhold_package(c: Context, package: str) -> None: """Remove a package from being held at a version.""" try: - c.sudo(f'apt-mark unhold {package}') + c.sudo(f"apt-mark unhold {package}") except Exception as e: sys_log_error(c, f"Failed to unhold package {package}", e) + @task @Context.wrap_context def sys_set_ipv4_precedence(c: Context) -> None: """Set IPv4 to take precedence for sites that prefer it.""" - get_address_info_config = '/etc/gai.conf' + get_address_info_config = "/etc/gai.conf" # Use POSIX character class [[:space:]] instead of \s, and use # delimiter in sed. - pattern_before = r'^[ \t]*#[ \t]*precedence[ \t]*::ffff:0:0/96[ \t]*100' - pattern_after = 'precedence ::ffff:0:0/96 100' + pattern_before = r"^[ \t]*#[ \t]*precedence[ \t]*::ffff:0:0/96[ \t]*100" + pattern_after = "precedence ::ffff:0:0/96 100" try: # Use | delimiter in sed to avoid conflicts with # in the pattern - sed_command = ( - f"sed -i \"s|{pattern_before}|{pattern_after}|\" {get_address_info_config}" - ) + sed_command = f'sed -i "s|{pattern_before}|{pattern_after}|" {get_address_info_config}' c.sudo(sed_command) except Exception as e: sys_log_error(c, "Failed to set IPv4 precedence", e) + @task @Context.wrap_context def run_command(c: Context, cmd: str, use_sudo: bool = False) -> Optional[str]: """Run a shell command, optionally with sudo, and handle errors.""" try: result = c.sudo(cmd) if use_sudo else c.run(cmd) - return result.stdout if hasattr(result, 'stdout') else str(result) + return result.stdout if hasattr(result, "stdout") else str(result) except Exception as e: sys_log_error(c, f"Command failed: {cmd}", e) return None - - diff --git a/cloudy/sys/docker.py b/cloudy/sys/docker.py index 03bb97e..c7aa849 100644 --- a/cloudy/sys/docker.py +++ b/cloudy/sys/docker.py @@ -1,34 +1,38 @@ import os + from fabric import task -from cloudy.util.context import Context -from cloudy.sys.etc import sys_etc_git_commit + from cloudy.sys.core import sys_mkdir, sys_restart_service +from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_docker_install(c: Context) -> None: """Install Docker CE on Ubuntu.""" url = "https://download.docker.com/linux/ubuntu" - c.sudo(f'curl -fsSL {url}/gpg | apt-key add -') + c.sudo(f"sh -c 'curl -fsSL {url}/gpg | apt-key add -'") c.sudo(f'add-apt-repository "deb [arch=amd64] {url} $(lsb_release -cs) stable"') - c.sudo('apt update') - c.sudo('apt -y install docker-ce') - c.sudo('systemctl enable docker') - sys_etc_git_commit(c, 'Installed docker (ce)') + c.sudo("apt update") + c.sudo("apt -y install docker-ce") + c.sudo("systemctl enable docker") + sys_etc_git_commit(c, "Installed docker (ce)") @task @Context.wrap_context def sys_docker_config(c: Context) -> None: """Configure Docker daemon and create /docker directory.""" - cfgdir = os.path.join(os.path.dirname(__file__), '../cfg') - localcfg = os.path.expanduser(os.path.join(cfgdir, 'docker/daemon.json')) - remotecfg = '/etc/docker/daemon.json' - c.sudo(f'rm -rf {remotecfg}') - c.put(localcfg, remotecfg) - sys_mkdir(c, '/docker') - sys_etc_git_commit(c, 'Configured docker') - sys_restart_service(c, 'docker') + cfgdir = os.path.join(os.path.dirname(__file__), "../cfg") + localcfg = os.path.expanduser(os.path.join(cfgdir, "docker/daemon.json")) + remotecfg = "/etc/docker/daemon.json" + c.sudo(f"rm -rf {remotecfg}") + c.put(localcfg, "/tmp/daemon.json") + c.sudo(f"mv /tmp/daemon.json {remotecfg}") + sys_mkdir(c, "/docker") + sys_etc_git_commit(c, "Configured docker") + sys_restart_service(c, "docker") @task @@ -36,5 +40,5 @@ def sys_docker_config(c: Context) -> None: def sys_docker_user_group(c: Context, username: str) -> None: """Add a user to the docker group.""" # Try to create the group, ignore error if it exists - c.sudo('groupadd docker', warn=True) - c.sudo(f'usermod -aG docker {username}') + c.sudo("groupadd docker", warn=True) + c.sudo(f"usermod -aG docker {username}") diff --git a/cloudy/sys/etc.py b/cloudy/sys/etc.py index e6726df..e0d714f 100644 --- a/cloudy/sys/etc.py +++ b/cloudy/sys/etc.py @@ -1,27 +1,32 @@ import sys + from fabric import task + from cloudy.util.context import Context + @task @Context.wrap_context def is_git_installed(c: Context) -> bool: """Check if git is installed on the host.""" - result = c.run('which git', hide=True, warn=True) + result = c.run("which git", hide=True, warn=True) return bool(result.stdout.strip()) + @task @Context.wrap_context def sys_etc_git_init(c: Context) -> None: """Initialize git tracking in /etc if not already present.""" if not is_git_installed(c): return - result = c.run('test -d /etc/.git', warn=True) + result = c.run("test -d /etc/.git", warn=True) if result.failed: - with c.cd('/etc'): - c.sudo('git init') - c.sudo('git add .') + with c.cd("/etc"): + c.sudo("git init") + c.sudo("git add .") c.sudo('git commit -a -m "Initial Submission"') + @task @Context.wrap_context def sys_etc_git_commit(c: Context, msg: str, print_only: bool = True) -> None: @@ -34,12 +39,9 @@ def sys_etc_git_commit(c: Context, msg: str, print_only: bool = True) -> None: return sys_etc_git_init(c) - with c.cd('/etc'): + with c.cd("/etc"): try: - c.sudo('git add .') + c.sudo("git add .") c.sudo(f'git commit -a -m "{msg}"', warn=True, hide=True) except Exception as e: print(f"Git commit failed: {e}", file=sys.stderr) - - - diff --git a/cloudy/sys/firewall.py b/cloudy/sys/firewall.py index a23ae04..ead2986 100644 --- a/cloudy/sys/firewall.py +++ b/cloudy/sys/firewall.py @@ -1,139 +1,157 @@ from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def fw_reload_ufw(c: Context) -> None: """Helper to reload and show UFW status.""" - c.sudo('ufw disable; echo "y" | ufw enable; sudo ufw status verbose') + c.sudo("sh -c 'ufw disable; echo \"y\" | ufw enable; ufw status verbose'") + @task @Context.wrap_context def fw_install(c: Context) -> None: """Install UFW firewall.""" # Disable UFW first (ignore errors if not installed/enabled) - c.sudo('ufw --force disable', warn=True) - + c.sudo("ufw --force disable", warn=True) + # Remove UFW completely - c.sudo('apt remove --purge -y ufw') - + c.sudo("apt remove --purge -y ufw") + # Clean up any remaining configuration files - c.sudo('apt autoremove -y') - + c.sudo("apt autoremove -y") + # Install UFW fresh - c.sudo('apt update') - c.sudo('apt -y install ufw') - - sys_etc_git_commit(c, 'Installed firewall (ufw)') - + c.sudo("apt update") + c.sudo("apt -y install ufw") + + sys_etc_git_commit(c, "Installed firewall (ufw)") + + @task @Context.wrap_context -def fw_secure_server(c: Context, ssh_port: str = '22') -> None: +def fw_secure_server(c: Context, ssh_port: str = "22") -> None: """Secure the server: deny all incoming, allow outgoing, allow SSH.""" - c.sudo('ufw logging on') - c.sudo('ufw default deny incoming') - c.sudo('ufw default allow outgoing') - c.sudo(f'ufw allow {ssh_port}') + c.sudo("ufw logging on") + c.sudo("ufw default deny incoming") + c.sudo("ufw default allow outgoing") + c.sudo(f"ufw allow {ssh_port}") fw_reload_ufw(c) - sys_etc_git_commit(c, 'Server is secured down') + sys_etc_git_commit(c, "Server is secured down") + @task @Context.wrap_context def fw_wide_open(c: Context) -> None: """Open up firewall: allow all incoming and outgoing.""" - c.sudo('ufw default allow incoming') - c.sudo('ufw default allow outgoing') + c.sudo("ufw default allow incoming") + c.sudo("ufw default allow outgoing") fw_reload_ufw(c) + @task @Context.wrap_context def fw_disable(c: Context) -> None: """Disable firewall.""" - c.sudo('ufw disable; sudo ufw status verbose') + c.sudo("ufw disable; sudo ufw status verbose") + @task @Context.wrap_context def fw_allow_incoming_http(c: Context) -> None: """Allow HTTP (port 80) requests.""" - c.sudo('ufw allow http') + c.sudo("ufw allow http") fw_reload_ufw(c) + @task @Context.wrap_context def fw_disallow_incoming_http(c: Context) -> None: """Disallow HTTP (port 80) requests.""" - c.sudo('ufw delete allow http') + c.sudo("ufw delete allow http") fw_reload_ufw(c) + @task @Context.wrap_context def fw_allow_incoming_https(c: Context) -> None: """Allow HTTPS (port 443) requests.""" - c.sudo('ufw allow https') + c.sudo("ufw allow https") fw_reload_ufw(c) + @task @Context.wrap_context def fw_disallow_incoming_https(c: Context) -> None: """Disallow HTTPS (port 443) requests.""" - c.sudo('ufw delete allow https') + c.sudo("ufw delete allow https") fw_reload_ufw(c) + @task @Context.wrap_context def fw_allow_incoming_postgresql(c: Context) -> None: """Allow PostgreSQL (port 5432) requests.""" - c.sudo('ufw allow postgresql') + c.sudo("ufw allow postgresql") fw_reload_ufw(c) + @task @Context.wrap_context def fw_disallow_incoming_postgresql(c: Context) -> None: """Disallow PostgreSQL (port 5432) requests.""" - c.sudo('ufw delete allow postgresql') + c.sudo("ufw delete allow postgresql") fw_reload_ufw(c) + @task @Context.wrap_context def fw_allow_incoming_port(c: Context, port: str) -> None: """Allow requests on a specific port.""" - c.sudo(f'ufw allow {port}') + c.sudo(f"ufw allow {port}") fw_reload_ufw(c) + @task @Context.wrap_context def fw_disallow_incoming_port(c: Context, port: int) -> None: """Disallow requests on a specific port.""" - c.sudo(f'ufw delete allow {port}') - c.sudo(f'ufw delete allow {port}/tcp', warn=True) - c.sudo(f'ufw delete allow {port}/udp', warn=True) + c.sudo(f"ufw delete allow {port}") + c.sudo(f"ufw delete allow {port}/tcp", warn=True) + c.sudo(f"ufw delete allow {port}/udp", warn=True) fw_reload_ufw(c) + @task @Context.wrap_context def fw_allow_incoming_port_proto(c: Context, port: str, proto: str) -> None: """Allow requests on a specific port/protocol.""" - c.sudo(f'ufw allow {port}/{proto}') + c.sudo(f"ufw allow {port}/{proto}") fw_reload_ufw(c) + @task @Context.wrap_context def fw_disallow_incoming_port_proto(c: Context, port: int, proto: str) -> None: """Disallow requests on a specific port/protocol.""" - c.sudo(f'ufw delete allow {port}/{proto}') + c.sudo(f"ufw delete allow {port}/{proto}") fw_reload_ufw(c) + @task @Context.wrap_context def fw_allow_incoming_host_port(c: Context, host: str, port: int) -> None: """Allow requests from a specific host on a specific port.""" - c.sudo(f'ufw allow from {host} to any port {port}') + c.sudo(f"ufw allow from {host} to any port {port}") fw_reload_ufw(c) + @task @Context.wrap_context def fw_disallow_incoming_host_port(c: Context, host: str, port: int) -> None: """Disallow requests from a specific host on a specific port.""" - c.sudo(f'ufw delete allow from {host} to any port {port}') + c.sudo(f"ufw delete allow from {host} to any port {port}") fw_reload_ufw(c) diff --git a/cloudy/sys/memcached.py b/cloudy/sys/memcached.py index e4f0d37..c56e8e9 100644 --- a/cloudy/sys/memcached.py +++ b/cloudy/sys/memcached.py @@ -1,63 +1,72 @@ import os + from fabric import task -from cloudy.util.context import Context + from cloudy.sys.core import sys_restart_service from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_memcached_install(c: Context) -> None: """Install memcached and restart the service.""" - c.sudo('apt -y install memcached') - sys_etc_git_commit(c, 'Installed memcached') - sys_restart_service(c, 'memcached') + c.sudo("apt -y install memcached") + sys_etc_git_commit(c, "Installed memcached") + sys_restart_service(c, "memcached") + @task @Context.wrap_context def sys_memcached_libdev_install(c: Context) -> None: """Install libmemcached-dev required by pylibmc.""" - c.sudo('apt -y install libmemcached-dev') + c.sudo("apt -y install libmemcached-dev") + @task @Context.wrap_context def sys_memcached_configure_memory(c: Context, memory: int = 0, divider: int = 8) -> None: """Configure memcached memory. If memory is 0, use total system memory divided by 'divider'.""" - memcached_conf = '/etc/memcached.conf' + memcached_conf = "/etc/memcached.conf" if not memory: result = c.run("free -m | awk '/^Mem:/{print $2}'", hide=True) total_mem = int(result.stdout.strip()) memory = total_mem // divider c.sudo(f'sed -i "s/-m\\s\\+[0-9]\\+/-m {memory}/g" {memcached_conf}') - sys_etc_git_commit(c, f'Configured memcached (memory={memory})') - sys_restart_service(c, 'memcached') + sys_etc_git_commit(c, f"Configured memcached (memory={memory})") + sys_restart_service(c, "memcached") + @task @Context.wrap_context def sys_memcached_configure_port(c: Context, port: int = 11211) -> None: """Configure memcached port.""" - memcached_conf = '/etc/memcached.conf' + memcached_conf = "/etc/memcached.conf" c.sudo(f'sed -i "s/-p\\s\\+[0-9]\\+/-p {port}/g" {memcached_conf}') - sys_etc_git_commit(c, f'Configured memcached (port={port})') - sys_restart_service(c, 'memcached') + sys_etc_git_commit(c, f"Configured memcached (port={port})") + sys_restart_service(c, "memcached") + @task @Context.wrap_context -def sys_memcached_configure_interface(c: Context, interface: str = '0.0.0.0') -> None: +def sys_memcached_configure_interface(c: Context, interface: str = "0.0.0.0") -> None: """Configure memcached interface.""" - memcached_conf = '/etc/memcached.conf' + memcached_conf = "/etc/memcached.conf" c.sudo(f'sed -i "s/-l\\s\\+[0-9.]\\+/-l {interface}/g" {memcached_conf}') - sys_etc_git_commit(c, f'Configured memcached (interface={interface})') - sys_restart_service(c, 'memcached') + sys_etc_git_commit(c, f"Configured memcached (interface={interface})") + sys_restart_service(c, "memcached") + @task @Context.wrap_context def sys_memcached_config(c: Context) -> None: """Replace memcached.conf with local config and reconfigure memory.""" - cfgdir = os.path.join(os.path.dirname(__file__), '../cfg') - localcfg = os.path.expanduser(os.path.join(cfgdir, 'memcached/memcached.conf')) - remotecfg = '/etc/memcached.conf' - c.sudo(f'rm -rf {remotecfg}') - c.put(localcfg, remotecfg) + cfgdir = os.path.join(os.path.dirname(__file__), "../cfg") + localcfg = os.path.expanduser(os.path.join(cfgdir, "memcached/memcached.conf")) + remotecfg = "/etc/memcached.conf" + c.sudo(f"rm -rf {remotecfg}") + c.put(localcfg, "/tmp/memcached.conf") + c.sudo(f"mv /tmp/memcached.conf {remotecfg}") sys_memcached_configure_memory(c) - sys_etc_git_commit(c, 'Configured memcached') - sys_restart_service(c, 'memcached') + sys_etc_git_commit(c, "Configured memcached") + sys_restart_service(c, "memcached") diff --git a/cloudy/sys/mount.py b/cloudy/sys/mount.py index d0f6eb6..288aeac 100644 --- a/cloudy/sys/mount.py +++ b/cloudy/sys/mount.py @@ -1,71 +1,72 @@ from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_mount_device_format( - c: Context, device: str, mount_point: str, filesystem: str = 'xfs' + c: Context, device: str, mount_point: str, filesystem: str = "xfs" ) -> None: """Format and mount a device, ensuring it survives reboot.""" if util_mount_is_mounted(c, device): - raise RuntimeError(f'Device ({device}) is already mounted') + raise RuntimeError(f"Device ({device}) is already mounted") util_mount_validate_vars(c, device, mount_point, filesystem) - c.sudo(f'mkfs.{filesystem} -f {device}') + c.sudo(f"mkfs.{filesystem} -f {device}") sys_mount_device(c, device, mount_point, filesystem) sys_mount_fstab_add(c, device, mount_point, filesystem) - sys_etc_git_commit(c, f'Mounted {device} on {mount_point} using {filesystem}') + sys_etc_git_commit(c, f"Mounted {device} on {mount_point} using {filesystem}") + @task @Context.wrap_context -def sys_mount_device( - c: Context, device: str, mount_point: str, filesystem: str = 'xfs' -) -> None: +def sys_mount_device(c: Context, device: str, mount_point: str, filesystem: str = "xfs") -> None: """Mount a device.""" if util_mount_is_mounted(c, device): - raise RuntimeError(f'Device ({device}) is already mounted') + raise RuntimeError(f"Device ({device}) is already mounted") util_mount_validate_vars(c, device, mount_point, filesystem) - c.sudo(f'mount -t {filesystem} {device} {mount_point}') + c.sudo(f"mount -t {filesystem} {device} {mount_point}") + @task @Context.wrap_context -def sys_mount_fstab_add( - c: Context, device: str, mount_point: str, filesystem: str = 'xfs' -) -> None: +def sys_mount_fstab_add(c: Context, device: str, mount_point: str, filesystem: str = "xfs") -> None: """Add a mount record into /etc/fstab.""" util_mount_validate_vars(c, device, mount_point, filesystem) entry = f"{device} {mount_point} {filesystem} noatime 0 0" - c.sudo(f'echo "{entry}" | sudo tee -a /etc/fstab') + c.sudo(f"sh -c 'echo \"{entry}\" >> /etc/fstab'") + @task @Context.wrap_context def util_mount_validate_vars( - c: Context, device: str, mount_point: str, filesystem: str = 'xfs' + c: Context, device: str, mount_point: str, filesystem: str = "xfs" ) -> None: """Check system for device, mount point, and file system.""" # Check if mount point exists, create if not - result = c.run(f'test -d {mount_point}', warn=True) + result = c.run(f"test -d {mount_point}", warn=True) if result.failed: - c.sudo(f'mkdir -p {mount_point}') + c.sudo(f"mkdir -p {mount_point}") # Check if device exists - result = c.run(f'test -e {device}', warn=True) + result = c.run(f"test -e {device}", warn=True) if result.failed: - raise RuntimeError(f'Device ({device}) missing or not attached') + raise RuntimeError(f"Device ({device}) missing or not attached") - if filesystem == 'xfs': - c.sudo('apt-get install -y xfsprogs') + if filesystem == "xfs": + c.sudo("apt-get install -y xfsprogs") + + c.sudo(f"grep -q {filesystem} /proc/filesystems || modprobe {filesystem}") - c.sudo(f'grep -q {filesystem} /proc/filesystems || modprobe {filesystem}') @task @Context.wrap_context def util_mount_is_mounted(c: Context, device: str) -> bool: """Check if a device is already mounted.""" - result = c.run('df', hide=True, warn=True) + result = c.run("df", hide=True, warn=True) return device in result.stdout - diff --git a/cloudy/sys/openvpn.py b/cloudy/sys/openvpn.py index 0102965..0dbfdf1 100644 --- a/cloudy/sys/openvpn.py +++ b/cloudy/sys/openvpn.py @@ -1,8 +1,10 @@ import os + from fabric import task -from cloudy.util.context import Context + +from cloudy.sys.core import sys_mkdir from cloudy.sys.etc import sys_etc_git_commit -from cloudy.sys.core import sys_mkdir, sys_restart_service +from cloudy.util.context import Context @task @@ -10,56 +12,62 @@ def sys_openvpn_docker_install( c: Context, domain: str, - port: str = '1194', - proto: str = 'udp', - passphrase: str = 'nopass', - datadir: str = '/docker/openvpn', - repo: str = 'kylemanna/openvpn' + port: str = "1194", + proto: str = "udp", + passphrase: str = "nopass", + datadir: str = "/docker/openvpn", + repo: str = "kylemanna/openvpn", ) -> None: """Install and initialize OpenVPN in Docker.""" docker_name = f"{proto}-{port}.{domain}" - docker_data = f'{datadir}/{docker_name}' + docker_data = f"{datadir}/{docker_name}" sys_mkdir(c, docker_data) - c.run(f"docker run --rm -v {docker_data}:/etc/openvpn {repo} ovpn_genconfig -u {proto}://{domain}:{port}") + c.run( + f"docker run --rm -v {docker_data}:/etc/openvpn {repo} " + f"ovpn_genconfig -u {proto}://{domain}:{port}" + ) - if passphrase == 'nopass': + if passphrase == "nopass": cmd = f"docker run --rm -v {docker_data}:/etc/openvpn -it {repo} ovpn_initpki nopass" else: cmd = f"docker run --rm -v {docker_data}:/etc/openvpn -it {repo} ovpn_initpki" - # Note: Fabric 2+ does not support interactive prompts natively like Fabric 1.x's settings(prompts=...) - # If you need to handle prompts, consider using pexpect or ensure 'nopass' is used for automation. + # Note: Fabric 2+ does not support interactive prompts natively + # like Fabric 1.x's settings(prompts=...) + # If you need to handle prompts, consider using pexpect or ensure + # 'nopass' is used for automation. c.run(cmd) - c.run(f"docker run -v {docker_data}:/etc/openvpn --name {docker_name} -d -p {port}:1194/{proto} --cap-add=NET_ADMIN {repo}") + c.run( + f"docker run -v {docker_data}:/etc/openvpn --name {docker_name} " + f"-d -p {port}:1194/{proto} --cap-add=NET_ADMIN {repo}" + ) c.run(f"docker update --restart=always {docker_name}") @task @Context.wrap_context def sys_openvpn_docker_conf( - c: Context, - domain: str, - port: str = '1194', - proto: str = 'udp' + c: Context, domain: str, port: str = "1194", proto: str = "udp" ) -> None: """Configure OpenVPN Docker systemd service.""" docker_name = f"{proto}-{port}.{domain}" - cfgdir = os.path.join(os.path.dirname(__file__), '../cfg') - localcfg = os.path.expanduser(os.path.join(cfgdir, 'openvpn/docker-systemd.cfg')) - remotecfg = f'/etc/systemd/system/docker-{docker_name}.service' - c.sudo(f'rm -rf {remotecfg}') - c.put(localcfg, remotecfg) + cfgdir = os.path.join(os.path.dirname(__file__), "../cfg") + localcfg = os.path.expanduser(os.path.join(cfgdir, "openvpn/docker-systemd.cfg")) + remotecfg = f"/etc/systemd/system/docker-{docker_name}.service" + c.sudo(f"rm -rf {remotecfg}") + c.put(localcfg, "/tmp/docker-openvpn.service") + c.sudo(f"mv /tmp/docker-openvpn.service {remotecfg}") # Replace placeholders in the config file c.sudo(f"sed -i 's/docker_port/{port}/g' {remotecfg}") c.sudo(f"sed -i 's/docker_proto/{proto}/g' {remotecfg}") c.sudo(f"sed -i 's/docker_domain/{domain}/g' {remotecfg}") c.sudo(f"sed -i 's/docker_image_name/{docker_name}/g' {remotecfg}") - sys_etc_git_commit(c, f'Configured {docker_name} docker') - c.sudo('systemctl daemon-reload') - c.sudo(f'systemctl enable docker-{docker_name}.service') - c.sudo(f'systemctl start docker-{docker_name}.service') + sys_etc_git_commit(c, f"Configured {docker_name} docker") + c.sudo("systemctl daemon-reload") + c.sudo(f"systemctl enable docker-{docker_name}.service") + c.sudo(f"systemctl start docker-{docker_name}.service") @task @@ -69,30 +77,39 @@ def sys_openvpn_docker_create_client( client_name: str, domain: str, port: int = 1194, - proto: str = 'udp', - passphrase: str = 'nopass', - datadir: str = '/docker/openvpn', - repo: str = 'kylemanna/openvpn' + proto: str = "udp", + passphrase: str = "nopass", + datadir: str = "/docker/openvpn", + repo: str = "kylemanna/openvpn", ) -> None: """Create a new OpenVPN client and fetch its config.""" docker_name = f"{proto}-{port}.{domain}" - docker_data = f'{datadir}/{docker_name}' + docker_data = f"{datadir}/{docker_name}" - if passphrase == 'nopass': - cmd = f"docker run --rm -v {docker_data}:/etc/openvpn -it {repo} easyrsa build-client-full {client_name} nopass" + if passphrase == "nopass": + cmd = ( + f"docker run --rm -v {docker_data}:/etc/openvpn -it {repo} " + f"easyrsa build-client-full {client_name} nopass" + ) else: - cmd = f"docker run --rm -v {docker_data}:/etc/openvpn -it {repo} easyrsa build-client-full {client_name}" + cmd = ( + f"docker run --rm -v {docker_data}:/etc/openvpn -it {repo} " + f"easyrsa build-client-full {client_name}" + ) # See note above about prompts c.run(cmd) - cmd = f"docker run --rm -v {docker_data}:/etc/openvpn {repo} ovpn_getclient {client_name} > /tmp/{client_name}.ovpn" + cmd = ( + f"docker run --rm -v {docker_data}:/etc/openvpn {repo} " + f"ovpn_getclient {client_name} > /tmp/{client_name}.ovpn" + ) c.run(cmd) remote_file = f"/tmp/{client_name}.ovpn" local_file = f"/tmp/{client_name}.ovpn" c.get(remote_file, local_file) - c.run(f'rm {remote_file}') + c.run(f"rm {remote_file}") @task @@ -102,14 +119,14 @@ def sys_openvpn_docker_revoke_client( client_name: str, domain: str, port: int = 1194, - proto: str = 'udp', - passphrase: str = 'nopass', - datadir: str = '/docker/openvpn', - repo: str = 'kylemanna/openvpn' + proto: str = "udp", + passphrase: str = "nopass", + datadir: str = "/docker/openvpn", + repo: str = "kylemanna/openvpn", ) -> None: """Revoke an OpenVPN client.""" docker_name = f"{proto}-{port}.{domain}" - docker_data = f'{datadir}/{docker_name}' + docker_data = f"{datadir}/{docker_name}" cmd = f"docker run --rm -it -v {docker_data}:/etc/openvpn {repo} easyrsa revoke {client_name}" c.run(cmd) @@ -124,13 +141,13 @@ def sys_openvpn_docker_show_client_list( c: Context, domain: str, port: int = 1194, - proto: str = 'udp', - datadir: str = '/docker/openvpn', - repo: str = 'kylemanna/openvpn' + proto: str = "udp", + datadir: str = "/docker/openvpn", + repo: str = "kylemanna/openvpn", ) -> None: """Show the list of OpenVPN clients.""" docker_name = f"{proto}-{port}.{domain}" - docker_data = f'{datadir}/{docker_name}' + docker_data = f"{datadir}/{docker_name}" cmd = f"docker run --rm -it -v {docker_data}:/etc/openvpn {repo} ovpn_listclients" c.run(cmd) diff --git a/cloudy/sys/ports.py b/cloudy/sys/ports.py index a57a1f7..b09d250 100644 --- a/cloudy/sys/ports.py +++ b/cloudy/sys/ports.py @@ -1,20 +1,23 @@ import sys + from fabric import task + from cloudy.util.context import Context + @task @Context.wrap_context -def sys_show_next_available_port(c: Context, start: str = '8181', max_tries: str = '50') -> str: +def sys_show_next_available_port(c: Context, start: str = "8181", max_tries: str = "50") -> str: """ Show the next available TCP port starting from 'start'. Returns the first available port found, or -1 if none found in range. """ port = start for _ in range(int(max_tries)): - result = c.run(f'netstat -lt | grep :{port}', hide=True, warn=True) + result = c.run(f"netstat -lt | grep :{port}", hide=True, warn=True) if not result.stdout.strip(): print(port) return port port = str(int(port) + 1) print(f"No available port found starting from {start}", file=sys.stderr) - return '-1' + return "-1" diff --git a/cloudy/sys/postfix.py b/cloudy/sys/postfix.py index 6488759..845ba07 100644 --- a/cloudy/sys/postfix.py +++ b/cloudy/sys/postfix.py @@ -1,39 +1,49 @@ from fabric import task -from cloudy.util.context import Context + from cloudy.sys.core import sys_restart_service from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_install_postfix(c: Context) -> None: - """ Install postfix for outgoing email (loopback) - Ex: (cmd)""" - + """Install postfix for outgoing email (loopback) - Ex: (cmd)""" + # Method 1: Try fixing debconf permissions and using debconf-set-selections try: - c.sudo('chmod 644 /var/cache/debconf/config.dat || true') - c.sudo('chmod 600 /var/cache/debconf/passwords.dat || true') - c.sudo('chown root:root /var/cache/debconf/*.dat || true') - + c.sudo("chmod 644 /var/cache/debconf/config.dat || true") + c.sudo("chmod 600 /var/cache/debconf/passwords.dat || true") + c.sudo("chown root:root /var/cache/debconf/*.dat || true") + # Ensure debconf-utils is installed - c.sudo('apt update && apt -y install debconf-utils') - + c.sudo("apt update && apt -y install debconf-utils") + # Set debconf selections - c.sudo('echo "postfix postfix/main_mailer_type select Internet Site" | debconf-set-selections') - c.sudo('echo "postfix postfix/mailname string localhost" | debconf-set-selections') - c.sudo('echo "postfix postfix/destinations string localhost.localdomain, localhost" | debconf-set-selections') - + c.sudo( + 'sh -c \'echo "postfix postfix/main_mailer_type select Internet Site" | ' + "debconf-set-selections'" + ) + c.sudo( + "sh -c 'echo \"postfix postfix/mailname string localhost\" | debconf-set-selections'" + ) + c.sudo( + "sh -c 'echo \"postfix postfix/destinations string localhost.localdomain, " + "localhost\" | debconf-set-selections'" + ) + # Install postfix - c.sudo('apt -y install postfix') - + c.sudo("apt -y install postfix") + except Exception: # Method 2: Fallback to non-interactive installation print("Debconf method failed, using non-interactive installation...") - c.sudo('DEBIAN_FRONTEND=noninteractive apt -y install postfix') - + c.sudo("DEBIAN_FRONTEND=noninteractive apt -y install postfix") + # Configure postfix after installation c.sudo('/usr/sbin/postconf -e "inet_interfaces = loopback-only"') c.sudo('/usr/sbin/postconf -e "mydestination = localhost.localdomain, localhost"') c.sudo('/usr/sbin/postconf -e "myhostname = localhost"') - - sys_etc_git_commit(c, 'Installed postfix on loopback for outgoing mail') - sys_restart_service(c, 'postfix') \ No newline at end of file + + sys_etc_git_commit(c, "Installed postfix on loopback for outgoing mail") + sys_restart_service(c, "postfix") diff --git a/cloudy/sys/python.py b/cloudy/sys/python.py index 7cd07c1..ddc89de 100644 --- a/cloudy/sys/python.py +++ b/cloudy/sys/python.py @@ -1,115 +1,108 @@ -from fabric import task -from cloudy.util.context import Context -from cloudy.sys.etc import sys_etc_git_commit - -from typing import Optional -from pathlib import Path -import subprocess import logging +import subprocess -logger = logging.getLogger(__name__) +from fabric import task -from typing import Optional -from pathlib import Path -import subprocess -import logging +from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context logger = logging.getLogger(__name__) + @task @Context.wrap_context -def sys_python_install_common(c: Context, py_version: str = '3.11') -> None: +def sys_python_install_common(c: Context, py_version: str = "3.11") -> None: """Install common Python application packages and dependencies. - + Args: c: Fabric context object py_version: Python version to install (default: '3.11') - + Raises: subprocess.CalledProcessError: If package installation fails """ try: # Parse Python version - major_version = py_version.split('.')[0] - + major_version = py_version.split(".")[0] + # Modern package list - removed deprecated packages base_packages = [ - f'python{major_version}-dev', - f'python{major_version}-setuptools', - f'python{major_version}-pip', - f'python{major_version}-venv', # Modern replacement for virtualenv - 'python3-dev', # Keep generic python3-dev - 'build-essential', # Essential build tools - 'pkg-config', + f"python{major_version}-dev", + f"python{major_version}-setuptools", + f"python{major_version}-pip", + f"python{major_version}-venv", # Modern replacement for virtualenv + "python3-dev", # Keep generic python3-dev + "build-essential", # Essential build tools + "pkg-config", ] - + # Image processing libraries (updated versions) image_packages = [ - 'libfreetype6-dev', - 'libjpeg-dev', # Updated from libjpeg62-dev - 'libpng-dev', # Updated from libpng12-dev - 'zlib1g-dev', - 'liblcms2-dev', - 'libwebp-dev', - 'libtiff5-dev', # Added TIFF support - 'libopenjp2-7-dev', # Added JPEG2000 support + "libfreetype6-dev", + "libjpeg-dev", # Updated from libjpeg62-dev + "libpng-dev", # Updated from libpng12-dev + "zlib1g-dev", + "liblcms2-dev", + "libwebp-dev", + "libtiff5-dev", # Added TIFF support + "libopenjp2-7-dev", # Added JPEG2000 support ] - + # System utilities utility_packages = [ - 'gettext', - 'curl', - 'wget', - 'git', # Often needed for pip installs from git + "gettext", + "curl", + "wget", + "git", # Often needed for pip installs from git ] - + all_packages = base_packages + image_packages + utility_packages - package_list = ' '.join(all_packages) - + package_list = " ".join(all_packages) + logger.info(f"Installing Python {py_version} and common packages...") - + # Update package list first - c.sudo('apt update') - + c.sudo("apt update") + # Install packages - c.sudo(f'apt -y install {package_list}') - + c.sudo(f"apt -y install {package_list}") + # Handle PEP 668 externally-managed-environment # Use system packages where possible, pip with --break-system-packages for others - + # Install system Python packages via apt (preferred method) system_python_packages = [ - 'python3-wheel', - 'python3-setuptools', - 'python3-pil', # Pillow via system package + "python3-wheel", + "python3-setuptools", + "python3-pil", # Pillow via system package ] - - system_package_list = ' '.join(system_python_packages) + + system_package_list = " ".join(system_python_packages) logger.info("Installing Python packages via system package manager...") - c.sudo(f'apt -y install {system_package_list}') - + c.sudo(f"apt -y install {system_package_list}") + # For packages not available as system packages, use pip with --break-system-packages # Only do this for essential packages that aren't available via apt - pip_cmd = f'pip{major_version}' if major_version != '2' else 'pip' - + pip_cmd = f"pip{major_version}" if major_version != "2" else "pip" + # Check if psycopg2 is available as system package first try: - c.sudo('apt -y install python3-psycopg2') + c.sudo("apt -y install python3-psycopg2") logger.info("Installed psycopg2 via system package") - except: + except Exception: logger.info("Installing psycopg2-binary via pip (system package not available)") - c.sudo(f'{pip_cmd} install --break-system-packages psycopg2-binary') - + c.sudo(f"{pip_cmd} install --break-system-packages psycopg2-binary") + # Verify installation - c.run(f'python{major_version} --version') - c.run(f'{pip_cmd} --version') - + c.run(f"python{major_version} --version") + c.run(f"{pip_cmd} --version") + logger.info("Python installation completed successfully") - sys_etc_git_commit(c, f'Installed Python {py_version} and common packages') - + sys_etc_git_commit(c, f"Installed Python {py_version} and common packages") + except subprocess.CalledProcessError as e: logger.error(f"Failed to install Python packages: {e}") raise except Exception as e: logger.error(f"Unexpected error during Python installation: {e}") - raise \ No newline at end of file + raise diff --git a/cloudy/sys/redis.py b/cloudy/sys/redis.py index b64db45..b395047 100644 --- a/cloudy/sys/redis.py +++ b/cloudy/sys/redis.py @@ -1,16 +1,20 @@ import os + from fabric import task -from cloudy.util.context import Context -from cloudy.sys.etc import sys_etc_git_commit + from cloudy.sys.core import sys_restart_service +from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_redis_install(c: Context) -> None: """Install redis-server and restart the service.""" - c.sudo('apt -y install redis-server') - sys_etc_git_commit(c, 'Installed redis-server') - sys_restart_service(c, 'redis-server') + c.sudo("apt -y install redis-server") + sys_etc_git_commit(c, "Installed redis-server") + sys_restart_service(c, "redis-server") + @task @Context.wrap_context @@ -19,72 +23,83 @@ def sys_redis_configure_memory(c: Context, memory: int = 0, divider: int = 8) -> Configure redis-server memory. If memory is 0, use total system memory divided by 'divider'. """ - redis_conf = '/etc/redis/redis.conf' + redis_conf = "/etc/redis/redis.conf" if not memory: result = c.run("free -m | awk '/^Mem:/{print $2}'", hide=True) total_mem = int(result.stdout.strip()) memory = total_mem // divider memory_bytes = memory * 1024 * 1024 c.sudo(f'sed -i "s/^maxmemory .*/maxmemory {memory_bytes}/" {redis_conf}') - sys_etc_git_commit(c, f'Configured redis-server (memory={memory_bytes})') - sys_restart_service(c, 'redis-server') + sys_etc_git_commit(c, f"Configured redis-server (memory={memory_bytes})") + sys_restart_service(c, "redis-server") + @task @Context.wrap_context -def sys_redis_configure_port(c: Context, port: str = '6379') -> None: +def sys_redis_configure_port(c: Context, port: str = "6379") -> None: """Configure redis-server port.""" - redis_conf = '/etc/redis/redis.conf' + redis_conf = "/etc/redis/redis.conf" c.sudo(f'sed -i "s/^port .*/port {port}/" {redis_conf}') - sys_etc_git_commit(c, f'Configured redis-server (port={port})') - sys_restart_service(c, 'redis-server') + sys_etc_git_commit(c, f"Configured redis-server (port={port})") + sys_restart_service(c, "redis-server") + @task @Context.wrap_context -def sys_redis_configure_interface(c: Context, interface: str = '0.0.0.0') -> None: +def sys_redis_configure_interface(c: Context, interface: str = "0.0.0.0") -> None: """Configure redis-server bind interface.""" - redis_conf = '/etc/redis/redis.conf' + redis_conf = "/etc/redis/redis.conf" c.sudo(f'sed -i "s/^bind .*/bind {interface}/" {redis_conf}') - sys_etc_git_commit(c, f'Configured redis-server (interface={interface})') - sys_restart_service(c, 'redis-server') + sys_etc_git_commit(c, f"Configured redis-server (interface={interface})") + sys_restart_service(c, "redis-server") + @task @Context.wrap_context -def sys_redis_configure_db_file(c: Context, path: str = '/var/lib/redis', dump: str = 'dump.rdb') -> None: +def sys_redis_configure_db_file( + c: Context, path: str = "/var/lib/redis", dump: str = "dump.rdb" +) -> None: """Configure redis-server dump file and directory.""" - redis_conf = '/etc/redis/redis.conf' + redis_conf = "/etc/redis/redis.conf" c.sudo(f"sed -i '/^dir /d' {redis_conf}") - c.sudo(f'echo "dir {path}" | sudo tee -a {redis_conf}') + c.sudo(f"sh -c 'echo \"dir {path}\" >> {redis_conf}'") c.sudo(f"sed -i '/^dbfilename /d' {redis_conf}") - c.sudo(f'echo "dbfilename {dump}" | sudo tee -a {redis_conf}') - sys_etc_git_commit(c, f'Configured redis-server (dir={path}, dumpfile={dump})') - sys_restart_service(c, 'redis-server') + c.sudo(f"sh -c 'echo \"dbfilename {dump}\" >> {redis_conf}'") + sys_etc_git_commit(c, f"Configured redis-server (dir={path}, dumpfile={dump})") + sys_restart_service(c, "redis-server") + @task @Context.wrap_context -def sys_redis_configure_pass(c: Context, password: str = '') -> None: +def sys_redis_configure_pass(c: Context, password: str = "") -> None: """Set or remove redis-server password.""" - redis_conf = '/etc/redis/redis.conf' + redis_conf = "/etc/redis/redis.conf" c.sudo(f"sed -i '/^requirepass /d' {redis_conf}") if password: - c.sudo(f'echo "requirepass {password}" | sudo tee -a {redis_conf}') - sys_etc_git_commit(c, 'Configured redis-server (password set)' if password else 'Configured redis-server (password removed)') - sys_restart_service(c, 'redis-server') + c.sudo(f"sh -c 'echo \"requirepass {password}\" >> {redis_conf}'") + sys_etc_git_commit( + c, + ( + "Configured redis-server (password set)" + if password + else "Configured redis-server (password removed)" + ), + ) + sys_restart_service(c, "redis-server") + @task @Context.wrap_context def sys_redis_config(c: Context) -> None: """Replace redis.conf with local config and reconfigure memory.""" - cfgdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../cfg/redis/redis.conf')) - remotecfg = '/etc/redis/redis.conf' + cfgdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../cfg/redis/redis.conf")) + remotecfg = "/etc/redis/redis.conf" if os.path.exists(cfgdir): - c.sudo(f'rm -f {remotecfg}') - c.put(cfgdir, remotecfg) + c.sudo(f"rm -f {remotecfg}") + c.put(cfgdir, "/tmp/redis.conf") + c.sudo(f"mv /tmp/redis.conf {remotecfg}") sys_redis_configure_memory(c) - sys_etc_git_commit(c, 'Configured redis-server') - sys_restart_service(c, 'redis-server') + sys_etc_git_commit(c, "Configured redis-server") + sys_restart_service(c, "redis-server") else: - print(f'Local redis config not found: {cfgdir}') - - - - + print(f"Local redis config not found: {cfgdir}") diff --git a/cloudy/sys/security.py b/cloudy/sys/security.py index 8d7ce49..5529959 100644 --- a/cloudy/sys/security.py +++ b/cloudy/sys/security.py @@ -1,19 +1,17 @@ from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_security_install_common(c: Context) -> None: """Install common security applications.""" requirements = [ - 'fail2ban', - 'logcheck', - 'logcheck-database', + "fail2ban", + "logcheck", + "logcheck-database", ] c.sudo(f'apt -y install {" ".join(requirements)}') - sys_etc_git_commit(c, 'Installed common security packages') - - - - + sys_etc_git_commit(c, "Installed common security packages") diff --git a/cloudy/sys/ssh.py b/cloudy/sys/ssh.py index 5221815..55c582d 100644 --- a/cloudy/sys/ssh.py +++ b/cloudy/sys/ssh.py @@ -1,98 +1,121 @@ import os + from fabric import task -from cloudy.util.context import Context + +from cloudy.sys.core import sys_reload_service, sys_restart_service from cloudy.sys.etc import sys_etc_git_commit -from cloudy.sys.core import sys_reload_service +from cloudy.util.context import Context @task @Context.wrap_context -def sys_ssh_set_port(c: Context, port: str = '22') -> None: +def sys_ssh_set_port(c: Context, port: str = "22") -> None: """Set SSH port.""" - sshd_config = '/etc/ssh/sshd_config' + sshd_config = "/etc/ssh/sshd_config" c.sudo(f"sed -i 's/^#*Port .*/Port {port}/' {sshd_config}") - sys_etc_git_commit(c, f'Configured ssh (Port={port})') - sys_reload_service(c, 'ssh') + sys_etc_git_commit(c, f"Configured ssh (Port={port})") + # SSH port changes require restart, not just reload + sys_restart_service(c, "ssh") + # Give SSH a moment to fully restart + c.run("sleep 2") @task @Context.wrap_context def sys_ssh_disable_root_login(c: Context) -> None: """Disable root login.""" - sshd_config = '/etc/ssh/sshd_config' + sshd_config = "/etc/ssh/sshd_config" c.sudo(f"sed -i 's/^#*PermitRootLogin .*/PermitRootLogin no/' {sshd_config}") - c.sudo('passwd -l root') - sys_etc_git_commit(c, 'Disabled root login') - sys_reload_service(c, 'ssh') + c.sudo("passwd -l root") + sys_etc_git_commit(c, "Disabled root login") + sys_reload_service(c, "ssh") @task @Context.wrap_context def sys_ssh_enable_root_login(c: Context) -> None: """Enable root login.""" - sshd_config = '/etc/ssh/sshd_config' + sshd_config = "/etc/ssh/sshd_config" c.sudo(f"sed -i 's/^#*PermitRootLogin .*/PermitRootLogin yes/' {sshd_config}") - sys_etc_git_commit(c, 'Enabled root login') - sys_reload_service(c, 'ssh') + sys_etc_git_commit(c, "Enabled root login") + sys_reload_service(c, "ssh") @task @Context.wrap_context def sys_ssh_enable_password_authentication(c: Context) -> None: """Enable password authentication.""" - sshd_config = '/etc/ssh/sshd_config' + sshd_config = "/etc/ssh/sshd_config" c.sudo(f"sed -i 's/^#*PasswordAuthentication .*/PasswordAuthentication yes/' {sshd_config}") - sys_etc_git_commit(c, 'Enable password authentication') - sys_reload_service(c, 'ssh') + sys_etc_git_commit(c, "Enable password authentication") + sys_reload_service(c, "ssh") @task @Context.wrap_context def sys_ssh_disable_password_authentication(c: Context) -> None: """Disable password authentication.""" - sshd_config = '/etc/ssh/sshd_config' + sshd_config = "/etc/ssh/sshd_config" c.sudo(f"sed -i 's/^#*PasswordAuthentication .*/PasswordAuthentication no/' {sshd_config}") - sys_etc_git_commit(c, 'Disable password authentication') - sys_reload_service(c, 'ssh') + sys_etc_git_commit(c, "Disable password authentication") + sys_reload_service(c, "ssh") @task @Context.wrap_context -def sys_ssh_push_public_key(c: Context, user: str, pub_key: str = '~/.ssh/id_rsa.pub') -> None: +def sys_ssh_push_public_key(c: Context, user: str, pub_key: str = "~/.ssh/id_rsa.pub") -> None: """Install a public key on the remote server for a user.""" - home_dir = '~' if user == 'root' else f'/home/{user}' - ssh_dir = f'{home_dir}/.ssh' - auth_key = f'{ssh_dir}/authorized_keys' + home_dir = "~" if user == "root" else f"/home/{user}" + ssh_dir = f"{home_dir}/.ssh" + auth_key = f"{ssh_dir}/authorized_keys" pub_key = os.path.expanduser(pub_key) if not os.path.exists(pub_key): - raise FileNotFoundError(f'Public key not found: {pub_key}') - c.sudo(f'mkdir -p {ssh_dir}') - c.put(pub_key, '/tmp/tmpkey') - c.sudo(f'cat /tmp/tmpkey >> {auth_key}') - c.sudo('rm -f /tmp/tmpkey') - c.sudo(f'chown -R {user}:{user} {ssh_dir}') - c.sudo(f'chmod 700 {ssh_dir}') - c.sudo(f'chmod 600 {auth_key}') + raise FileNotFoundError(f"Public key not found: {pub_key}") + c.sudo(f"mkdir -p {ssh_dir}") + c.put(pub_key, "/tmp/tmpkey") + c.sudo(f"sh -c 'cat /tmp/tmpkey >> {auth_key}'") + c.sudo("rm -f /tmp/tmpkey") + c.sudo(f"chown -R {user}:{user} {ssh_dir}") + c.sudo(f"chmod 700 {ssh_dir}") + c.sudo(f"chmod 600 {auth_key}") @task @Context.wrap_context -def sys_ssh_push_server_shared_keys(c: Context, user: str, shared_dir: str = '~/.ssh/shared/ssh/') -> None: +def sys_ssh_push_server_shared_keys( + c: Context, user: str, shared_dir: str = "~/.ssh/shared/ssh/" +) -> None: """Install shared SSH keys for a user (e.g., for GitHub access).""" - home_dir = '~' if user == 'root' else f'/home/{user}' + home_dir = "~" if user == "root" else f"/home/{user}" key_dir = os.path.expanduser(shared_dir) - pri_key = os.path.join(key_dir, 'id_rsa') - pub_key = os.path.join(key_dir, 'id_rsa.pub') + pri_key = os.path.join(key_dir, "id_rsa") + pub_key = os.path.join(key_dir, "id_rsa.pub") for key in (pri_key, pub_key): if not os.path.exists(key): - raise FileNotFoundError(f'Missing key file: {key}') - remote_ssh_dir = f'{home_dir}/.ssh' - c.sudo(f'mkdir -p {remote_ssh_dir}') - c.put(pri_key, f'{remote_ssh_dir}/id_rsa') - c.put(pub_key, f'{remote_ssh_dir}/id_rsa.pub') - c.sudo(f'chown -R {user}:{user} {remote_ssh_dir}') - c.sudo(f'chmod 700 {remote_ssh_dir}') - c.sudo(f'chmod 600 {remote_ssh_dir}/id_rsa') - c.sudo(f'chmod 644 {remote_ssh_dir}/id_rsa.pub') - - + raise FileNotFoundError(f"Missing key file: {key}") + remote_ssh_dir = f"{home_dir}/.ssh" + c.sudo(f"mkdir -p {remote_ssh_dir}") + c.put(pri_key, f"{remote_ssh_dir}/id_rsa") + c.put(pub_key, f"{remote_ssh_dir}/id_rsa.pub") + c.sudo(f"chown -R {user}:{user} {remote_ssh_dir}") + c.sudo(f"chmod 700 {remote_ssh_dir}") + c.sudo(f"chmod 600 {remote_ssh_dir}/id_rsa") + c.sudo(f"chmod 644 {remote_ssh_dir}/id_rsa.pub") + + +def validate_ssh_config(ssh_port: str) -> None: + """ + Validate SSH configuration values. + + Args: + ssh_port: SSH port to validate + + Raises: + ValueError: If validation fails + """ + try: + port_num = int(ssh_port) + if not (1 <= port_num <= 65535): + raise ValueError(f"ssh-port must be between 1-65535, got: {ssh_port}") + except ValueError as exc: + raise ValueError(f"ssh-port must be a valid integer, got: {ssh_port}") from exc diff --git a/cloudy/sys/swap.py b/cloudy/sys/swap.py index 096637d..5a2346c 100644 --- a/cloudy/sys/swap.py +++ b/cloudy/sys/swap.py @@ -1,24 +1,27 @@ import sys + from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context -def sys_swap_configure(c: Context, size: str = '512') -> None: +def sys_swap_configure(c: Context, size: str = "512") -> None: """ Create and install a swap file of the given size in MB. """ - swap_file = f'/swap/{size}MiB.swap' - c.sudo('mkdir -p /swap') + swap_file = f"/swap/{size}MiB.swap" + c.sudo("mkdir -p /swap") # Check if swap file exists - result = c.run(f'test -e {swap_file}', warn=True) + result = c.run(f"test -e {swap_file}", warn=True) if result.failed: - c.sudo(f'fallocate -l {size}m {swap_file}') - c.sudo(f'chmod 600 {swap_file}') - c.sudo(f'mkswap {swap_file}') - c.sudo(f'swapon {swap_file}') - c.sudo(f'echo "{swap_file} swap swap defaults 0 0" | sudo tee -a /etc/fstab') - sys_etc_git_commit(c, f'Added swap file ({swap_file})') + c.sudo(f"fallocate -l {size}m {swap_file}") + c.sudo(f"chmod 600 {swap_file}") + c.sudo(f"mkswap {swap_file}") + c.sudo(f"swapon {swap_file}") + c.sudo(f"sh -c 'echo \"{swap_file} swap swap defaults 0 0\" >> /etc/fstab'") + sys_etc_git_commit(c, f"Added swap file ({swap_file})") else: - print(f'Swap file ({swap_file}) exists', file=sys.stderr) + print(f"Swap file ({swap_file}) exists", file=sys.stderr) diff --git a/cloudy/sys/timezone.py b/cloudy/sys/timezone.py index 915ff4a..859ca0d 100644 --- a/cloudy/sys/timezone.py +++ b/cloudy/sys/timezone.py @@ -1,33 +1,38 @@ import os import sys + from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_time_install_common(c: Context) -> None: """Install common time/zone related packages.""" - requirements = ['ntpsec', 'ntpdate'] + requirements = ["ntpsec", "ntpdate"] c.sudo(f'apt -y install {" ".join(requirements)}') sys_configure_ntp(c) - sys_etc_git_commit(c, 'Installed time/zone related system packages') + sys_etc_git_commit(c, "Installed time/zone related system packages") + @task @Context.wrap_context -def sys_configure_timezone(c: Context, zone: str = 'Canada/Eastern') -> None: +def sys_configure_timezone(c: Context, zone: str = "Canada/Eastern") -> None: """Configure system time zone.""" - zone_path = os.path.abspath(os.path.join('/usr/share/zoneinfo', zone)) - result = c.run(f'test -e {zone_path}', warn=True) + zone_path = os.path.abspath(os.path.join("/usr/share/zoneinfo", zone)) + result = c.run(f"test -e {zone_path}", warn=True) if result.ok: - c.sudo(f'ln -sf {zone_path} /etc/localtime') - sys_etc_git_commit(c, f'Updated system timezone to ({zone})') + c.sudo(f"ln -sf {zone_path} /etc/localtime") + sys_etc_git_commit(c, f"Updated system timezone to ({zone})") else: - print(f'Zone not found {zone_path}', file=sys.stderr) + print(f"Zone not found {zone_path}", file=sys.stderr) + @task @Context.wrap_context def sys_configure_ntp(c: Context) -> None: """Configure NTP with a daily sync cron job.""" - cron_line = '59 23 * * * /usr/sbin/ntpdate ntp.ubuntu.com > /dev/null' - c.sudo(f'echo "{cron_line}" | sudo tee -a /var/spool/cron/crontabs/root') \ No newline at end of file + cron_line = "59 23 * * * /usr/sbin/ntpdate ntp.ubuntu.com > /dev/null" + c.sudo(f"sh -c 'echo \"{cron_line}\" >> /var/spool/cron/crontabs/root'") diff --git a/cloudy/sys/user.py b/cloudy/sys/user.py index 9988cad..7ded76f 100644 --- a/cloudy/sys/user.py +++ b/cloudy/sys/user.py @@ -1,18 +1,21 @@ import sys + from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_user_delete(c: Context, username: str) -> None: """Delete a user (except root).""" - if username == 'root': - print('Cannot delete root user', file=sys.stderr) + if username == "root": + print("Cannot delete root user", file=sys.stderr) return - c.sudo(f'pkill -KILL -u {username}', warn=True) - c.sudo(f'userdel {username}', warn=True) - sys_etc_git_commit(c, f'Deleted user({username})') + c.sudo(f"pkill -KILL -u {username}", warn=True) + c.sudo(f"userdel {username}", warn=True) + sys_etc_git_commit(c, f"Deleted user({username})") @task @@ -21,15 +24,29 @@ def sys_user_add(c: Context, username: str) -> None: """Add a new user, deleting any existing user with the same name.""" sys_user_delete(c, username) c.sudo(f'useradd --create-home --shell "/bin/bash" {username}', warn=True) - sys_etc_git_commit(c, f'Added user({username})') + sys_etc_git_commit(c, f"Added user({username})") @task @Context.wrap_context def sys_user_add_sudoer(c: Context, username: str) -> None: """Add user to sudoers.""" - c.sudo(f'echo "{username} ALL=(ALL:ALL) ALL" | sudo tee -a /etc/sudoers') - sys_etc_git_commit(c, f'Added user to sudoers - ({username})') + c.sudo(f"sh -c 'echo \"{username} ALL=(ALL:ALL) ALL\" >> /etc/sudoers'") + sys_etc_git_commit(c, f"Added user to sudoers - ({username})") + + +@task +@Context.wrap_context +def sys_user_add_passwordless_sudoer(c: Context, username: str) -> None: + """ + Add user to sudoers with passwordless sudo access. + + WARNING: This is a security risk! Use only for automation accounts + or in highly controlled environments. Passwordless sudo means any + compromise of this user account = instant root access. + """ + c.sudo(f"sh -c 'echo \"{username} ALL=(ALL:ALL) NOPASSWD:ALL\" >> /etc/sudoers'") + sys_etc_git_commit(c, f"Added user to passwordless sudoers - ({username})") @task @@ -37,22 +54,22 @@ def sys_user_add_sudoer(c: Context, username: str) -> None: def sys_user_remove_sudoer(c: Context, username: str) -> None: """Remove user from sudoers.""" c.sudo(f"sed -i '/\\s*{username}\\s*.*/d' /etc/sudoers") - sys_etc_git_commit(c, f'Removed user from sudoers - ({username})') + sys_etc_git_commit(c, f"Removed user from sudoers - ({username})") @task @Context.wrap_context def sys_user_add_to_group(c: Context, username: str, group: str) -> None: """Add user to an existing group.""" - c.sudo(f'usermod -a -G {group} {username}', warn=True) - sys_etc_git_commit(c, f'Added user ({username}) to group ({group})') + c.sudo(f"usermod -a -G {group} {username}", warn=True) + sys_etc_git_commit(c, f"Added user ({username}) to group ({group})") @task @Context.wrap_context def sys_user_add_to_groups(c: Context, username: str, groups: str) -> None: """Add user to multiple groups (comma-separated).""" - for group in [g.strip() for g in groups.split(',') if g.strip()]: + for group in [g.strip() for g in groups.split(",") if g.strip()]: sys_user_add_to_group(c, username, group) @@ -60,15 +77,15 @@ def sys_user_add_to_groups(c: Context, username: str, groups: str) -> None: @Context.wrap_context def sys_user_create_group(c: Context, group: str) -> None: """Create a new group.""" - c.sudo(f'addgroup {group}', warn=True) - sys_etc_git_commit(c, f'Created a new group ({group})') + c.sudo(f"addgroup {group}", warn=True) + sys_etc_git_commit(c, f"Created a new group ({group})") @task @Context.wrap_context def sys_user_create_groups(c: Context, groups: str) -> None: """Create multiple groups (comma-separated).""" - for group in [g.strip() for g in groups.split(',') if g.strip()]: + for group in [g.strip() for g in groups.split(",") if g.strip()]: sys_user_create_group(c, group) @@ -76,36 +93,82 @@ def sys_user_create_groups(c: Context, groups: str) -> None: @Context.wrap_context def sys_user_remove_from_group(c: Context, username: str, group: str) -> None: """Remove a user from a group.""" - c.sudo(f'deluser {username} {group}') - sys_etc_git_commit(c, f'Removed user ({username}) from group ({group})') + c.sudo(f"deluser {username} {group}") + sys_etc_git_commit(c, f"Removed user ({username}) from group ({group})") @task @Context.wrap_context -def sys_user_set_group_umask(c: Context, username: str, umask: str = '0002') -> None: +def sys_user_set_group_umask(c: Context, username: str, umask: str = "0002") -> None: """Set user umask in .bashrc.""" - bashrc = f'/home/{username}/.bashrc' + bashrc = f"/home/{username}/.bashrc" c.sudo(f"sed -i '/\\s*umask\\s*.*/d' {bashrc}") c.sudo(f"sed -i '1iumask {umask}' {bashrc}") - sys_etc_git_commit(c, f'Added umask ({umask}) to user ({username})') + sys_etc_git_commit(c, f"Added umask ({umask}) to user ({username})") @task @Context.wrap_context def sys_user_change_password(c: Context, username: str, password: str) -> None: """Change password for a user.""" - c.sudo(f'echo "{username}:{password}" | chpasswd') - sys_etc_git_commit(c, f'Password changed for user ({username})') + c.sudo(f"sh -c 'echo \"{username}:{password}\" | chpasswd'") + sys_etc_git_commit(c, f"Password changed for user ({username})") @task @Context.wrap_context def sys_user_set_pip_cache_dir(c: Context, username: str) -> None: """Set cache dir for pip for a given user.""" - bashrc = f'/home/{username}/.bashrc' - cache_dir = '/srv/www/.pip_cache_dir' - c.sudo(f'mkdir -p {cache_dir}') - c.sudo(f'chown -R :www-data {cache_dir}') - c.sudo(f'chmod -R ug+wrx {cache_dir}') + bashrc = f"/home/{username}/.bashrc" + cache_dir = "/srv/www/.pip_cache_dir" + c.sudo(f"mkdir -p {cache_dir}") + c.sudo(f"chown -R :www-data {cache_dir}") + c.sudo(f"chmod -R ug+wrx {cache_dir}") c.sudo(f"sed -i '/\\s*PIP_DOWNLOAD_CACHE\\s*.*/d' {bashrc}") c.sudo(f"sed -i '1iexport PIP_DOWNLOAD_CACHE={cache_dir}' {bashrc}") + + +def sys_user_create_with_setup( + c: Context, user_name: str, password: str, groups: str, shared_key_dir: str = "" +) -> None: + """ + Create a user with full setup including groups, sudoer, and SSH keys. + + Args: + c: Fabric context + user_name: Username to create + password: Password for the user + groups: Comma-separated list of groups to add user to + shared_key_dir: Optional path to shared SSH keys directory + """ + if not user_name or not password: + return + + sys_user_add(c, user_name) + sys_user_change_password(c, user_name, password) + sys_user_add_sudoer(c, user_name) + sys_user_set_group_umask(c, user_name) + sys_user_create_groups(c, groups) + sys_user_add_to_groups(c, user_name, groups) + + # Set up SSH keys if configured + if shared_key_dir: + # Import here to avoid circular imports + from cloudy.sys import ssh + + ssh.sys_ssh_push_server_shared_keys(c, user_name, shared_key_dir) + + +def validate_user_config(username: str, password: str) -> None: + """ + Validate user configuration values. + + Args: + username: Username to validate + password: Password to validate + + Raises: + ValueError: If validation fails + """ + if username and not password: + raise ValueError(f"User '{username}' specified but password is missing") diff --git a/cloudy/sys/vim.py b/cloudy/sys/vim.py index c83560e..62c5398 100644 --- a/cloudy/sys/vim.py +++ b/cloudy/sys/vim.py @@ -1,20 +1,16 @@ from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def sys_set_default_editor(c: Context, default: int = 3) -> None: """ Set the default editor using update-alternatives. - :param default: The selection number for the editor (as shown by update-alternatives --config editor). + :param default: The selection number for the editor + (as shown by update-alternatives --config editor). """ - c.sudo(f'echo {default} | update-alternatives --config editor') - sys_etc_git_commit(c, f'Set default editor to ({default})') - - - - - - - + c.sudo(f"sh -c 'echo {default} | update-alternatives --config editor'") + sys_etc_git_commit(c, f"Set default editor to ({default})") diff --git a/cloudy/util/conf.py b/cloudy/util/conf.py index a6e172a..db0953c 100644 --- a/cloudy/util/conf.py +++ b/cloudy/util/conf.py @@ -1,17 +1,19 @@ -import os import configparser import logging +import os from typing import Any, Dict, Optional LOG_LEVEL = logging.INFO FORMAT = "%(levelname)-10s %(name)s %(message)s" logging.basicConfig(format=FORMAT, level=LOG_LEVEL) + class CloudyConfig: """ CloudyConfig loads and manages configuration from multiple files. The last file in the list has the highest precedence. """ + def __init__(self, filenames: Any = None, log_level: int = logging.WARNING) -> None: self.log = logging.getLogger(os.path.basename(__file__)) self.log.setLevel(log_level) @@ -22,26 +24,32 @@ def __init__(self, filenames: Any = None, log_level: int = logging.WARNING) -> N paths: list[str] = [] # 1. Config file in current directory - cwd_path = os.path.abspath('./.cloudy') + cwd_path = os.path.abspath("./.cloudy") if os.path.exists(cwd_path): paths.append(cwd_path) # 2. Config file in home directory - home_path = os.path.expanduser('~/.cloudy') + home_path = os.path.expanduser("~/.cloudy") if os.path.exists(home_path): paths.append(home_path) # 3. Explicitly passed config file(s) if filenames: if isinstance(filenames, str): - filenames = [filenames] + # Handle comma-separated paths + if "," in filenames: + filenames = [f.strip() for f in filenames.split(",")] + else: + filenames = [filenames] for f in filenames: p = os.path.expanduser(f) if os.path.exists(p) and p not in paths: paths.append(p) # 4. Defaults file (lowest precedence) - defaults_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../cfg/defaults.cfg")) + defaults_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../cfg/defaults.cfg") + ) if os.path.exists(defaults_path): paths.insert(0, defaults_path) @@ -69,7 +77,7 @@ def _section_map(self, section: str) -> Dict[str, Optional[str]]: valid[option] = None return valid - def get_variable(self, section: str, variable: str, fallback: str = '') -> str: + def get_variable(self, section: str, variable: str, fallback: str = "") -> str: """ Get a variable value from a section, with optional fallback. Section is case-insensitive. @@ -91,11 +99,23 @@ def add_variable_to_environ(self, section: str, variable: str) -> None: else: self.log.warning(f"No such variable ({variable}) in section [{section}]") except Exception as e: - self.log.warning(f"Failed to set environment variable ({variable}) from section [{section}]: {e}") - - - + self.log.warning( + f"Failed to set environment variable ({variable}) from section [{section}]: {e}" + ) + def get_boolean_config(self, section: str, key: str, default: bool = False) -> bool: + """ + Get a boolean configuration value from a section. + Accepts various formats: YES/NO, TRUE/FALSE, 1/0, ON/OFF (case-insensitive) + Args: + section: Configuration section name + key: Configuration key name + default: Default value if key not found + Returns: + Boolean value + """ + value = self.get_variable(section, key, "").upper() + return value in ("YES", "TRUE", "1", "ON") diff --git a/cloudy/util/context.py b/cloudy/util/context.py index 14b2049..c4c717d 100644 --- a/cloudy/util/context.py +++ b/cloudy/util/context.py @@ -1,9 +1,14 @@ +"""Enhanced Fabric Context with smart output control and SSH reconnection.""" + import logging +import os +import re import sys -from colorama import Fore, Style from functools import wraps +from typing import Callable, List + +from colorama import Fore, Style from fabric import Connection -from typing import Callable, Optional, Dict, Any logger = logging.getLogger("fab-commands") logger.setLevel(logging.INFO) @@ -13,20 +18,234 @@ logger.handlers = [handler] logger.propagate = False +# Commands that should ALWAYS show output (informational/status commands) +ALWAYS_SHOW_OUTPUT: List[str] = [ + "ufw status", + "systemctl status", + "service status", + "df", + "free", + "ps", + "netstat", + "iptables -L", + "lsblk", + "mount", + "who", + "w", + "uptime", + "date", + "psql --version", + "pg_lsclusters", + "apache2ctl status", + "nginx -t", + "docker ps", + "docker images", + "git status", + "git log", + "git diff", + "tail", + "head", + "cat", + "less", + "more", + "ls -la", + "find", + "grep", + "awk", + "sed -n", # when used for display + "echo", + "printf", + "hostname", + "uname", + "id", + "whoami", + "pwd", + "which", + "whereis", +] + +# Commands that are typically "noisy" and should be hidden by default (regex patterns) +HIDE_BY_DEFAULT_PATTERNS: List[str] = [ + # Package management + r"apt.*update", + r"apt.*install", + r"apt.*upgrade", + r"apt.*remove", + r"apt.*autoremove", + r"apt.*list\s+--upgradable", + r"apt-get.*update", + r"apt-get.*install", + r"apt-get.*upgrade", + r"yum.*install", + r"yum.*update", + r"dnf.*install", + r"dpkg\s+-i", + r"dpkg-reconfigure", + r"rpm\s+-i", + # Downloads and archives + r"wget\s+", + r"curl\s+-.*", # downloading + r"unzip\s+", + r"tar\s+-[xz]", + r"g?zip\s+", + r"bunzip2?\s+", + # Build tools + r"make\s+", + r"cmake\s+", + r"pip.*install", + r"npm.*install", + r"yarn.*install", + r"composer.*install", + r"bundle.*install", + r"mvn.*install", + r"gradle.*build", + r"go.*build", + r"cargo.*build", + # System configuration and services + r"systemctl\s+(start|stop|reload|restart)", + r"service\s+.*\s+(start|stop|reload|restart)", + r"update-alternatives", + r"debconf-set-selections", + r"passwd\s+", + r"chpasswd", + # File operations + r"chmod\s+", + r"chown\s+", + r"mkdir\s+-p", + r"ln\s+-sf", + r"mv\s+", + r"cp\s+", + r"rm\s+", + r"sed\s+-i", + r"sh\s+-c.*>", # shell redirects + r"echo.*>", # redirect operations + r"cat.*>>", # append operations + # SSH and network + r"ssh-keygen", + r"scp\s+", + # Database operations + r"pg_createcluster", + r"pg_dropcluster", + r"pg_ctlcluster", + r"createdb\s+", + r"dropdb\s+", + r"createuser\s+", + r"dropuser\s+", + r"pg_dump\s+", + r"pg_restore\s+", + r"psql\s+-c", # psql commands (but not interactive psql) + r"mysqldump\s+", + r"mysql\s+-e", # mysql commands +] + + class Context(Connection): + """ + Enhanced Fabric Connection with smart output control and SSH reconnection. + + Provides intelligent command output filtering, automatic password handling, + and robust SSH port reconnection for server automation tasks. + """ + + @property + def verbose(self) -> bool: + """Check if verbose output is enabled via environment variable or config.""" + # Check environment variable for verbose mode + if os.environ.get("CLOUDY_VERBOSE", "").lower() in ("1", "true", "yes"): + return True + + # Check Fabric's built-in debug flag (--debug enables verbose too) + if hasattr(self.config, "run") and getattr(self.config.run, "echo", False): + return True + + return getattr(self.config, "cloudy_verbose", False) or getattr( + self.config, "cloudy_debug", False + ) + + @property + def debug(self) -> bool: + """Check if debug output is enabled via Fabric's built-in debug flag.""" + # Check Fabric's built-in debug config + if hasattr(self.config, "run") and getattr(self.config.run, "echo", False): + return True + + return getattr(self.config, "cloudy_debug", False) + + def _should_show_output(self, command: str) -> bool: + """Determine if command output should be shown based on command type.""" + # Debug mode: show everything + if self.debug: + return True + + # Verbose mode: show everything + if self.verbose: + return True + + cmd_lower = command.lower().strip() + + # Hide noisy commands by default FIRST (regex matching) + for pattern in HIDE_BY_DEFAULT_PATTERNS: + if re.search(pattern, cmd_lower): + return False + + # Always show output for informational commands (substring matching) + for pattern in ALWAYS_SHOW_OUTPUT: + if pattern in cmd_lower: + return True + + # For other commands, show output (conservative approach) + return True + def run(self, command, *args, **kwargs): print(f"\n{Fore.CYAN}### {command}\n-----------{Style.RESET_ALL}", flush=True) - kwargs.setdefault("hide", False) + + show_output = self._should_show_output(command) + kwargs.setdefault("hide", not show_output) kwargs.setdefault("pty", True) - return super().run(command, *args, **kwargs) + + result = super().run(command, *args, **kwargs) + + # Only show success/failure indicators for commands where we hid the output + if not show_output: + if result.failed: + print(f"{Fore.RED}❌ FAILED{Style.RESET_ALL}") + if result.stderr: + print(f"Error: {result.stderr.strip()}") + elif result.stdout: + print(f"Output: {result.stdout.strip()}") + else: + print(f"{Fore.GREEN}✅ SUCCESS{Style.RESET_ALL}") + + return result def sudo(self, command, *args, **kwargs): print(f"\n{Fore.YELLOW}### {command}\n-----------{Style.RESET_ALL}", flush=True) - kwargs.setdefault("hide", False) + + # Check for environment variable and set it if config is None + env_password = os.environ.get("INVOKE_SUDO_PASSWORD") + if hasattr(self.config, "sudo") and not self.config.sudo.password and env_password: + self.config.sudo.password = env_password + + show_output = self._should_show_output(command) + kwargs.setdefault("hide", not show_output) kwargs.setdefault("pty", True) - return super().sudo(command, *args, **kwargs) - def reconnect(self, new_port: str = '', new_user: str = '') -> 'Context': + result = super().sudo(command, *args, **kwargs) + + # Only show success/failure indicators for commands where we hid the output + if not show_output: + if result.failed: + print(f"{Fore.RED}❌ FAILED{Style.RESET_ALL}") + if result.stderr: + print(f"Error: {result.stderr.strip()}") + elif result.stdout: + print(f"Output: {result.stdout.strip()}") + else: + print(f"{Fore.GREEN}✅ SUCCESS{Style.RESET_ALL}") + + return result + + def reconnect(self, new_port: str = "", new_user: str = "") -> "Context": """ Creates and returns a new Context (Connection) object to the same host and user, but on a different port, preserving other connection details. @@ -43,8 +262,11 @@ def reconnect(self, new_port: str = '', new_user: str = '') -> 'Context': port_to_use = new_port or self.port user_to_use = new_user or self.user host_to_use = self.host - - print(f"\nAttempting to reconnect to {self.host} as user {user_to_use} on new port {port_to_use}...") + + print( + f"\nAttempting to reconnect to {self.host} as user {user_to_use} " + f"on new port {port_to_use}..." + ) connect_kwargs_to_use = {} if isinstance(self.connect_kwargs, dict): @@ -54,14 +276,14 @@ def reconnect(self, new_port: str = '', new_user: str = '') -> 'Context': # --- CRITICAL FIX FOR AmbiguousMergeError --- # inline_ssh_env should be a Boolean, not a dictionary - inline_ssh_env_to_use = getattr(self, 'inline_ssh_env', False) + inline_ssh_env_to_use = getattr(self, "inline_ssh_env", False) if not isinstance(inline_ssh_env_to_use, bool): inline_ssh_env_to_use = False # --- END CRITICAL FIX --- - connect_kwargs_to_use.pop('port', None) - connect_kwargs_to_use.pop('connect_timeout', None) - connect_kwargs_to_use.pop('forward_agent', None) + connect_kwargs_to_use.pop("port", None) + connect_kwargs_to_use.pop("connect_timeout", None) + connect_kwargs_to_use.pop("forward_agent", None) if self.is_connected: self.close() @@ -74,25 +296,42 @@ def reconnect(self, new_port: str = '', new_user: str = '') -> 'Context': port=port_to_use, gateway=gateway_to_use, connect_kwargs=connect_kwargs_to_use, - inline_ssh_env=inline_ssh_env_to_use, # Use the Boolean value + inline_ssh_env=inline_ssh_env_to_use, # Use the Boolean value ) - try: + # Try to connect with retries for SSH port changes + import time + + max_retries = 3 + retry_delay = 2 - new_ctx.open() # Explicitly open to test the connection - new_ctx.run("echo 'Successfully reconnected on new port.'", hide=True) - print(f"Successfully re-established connection on {new_ctx.host}:{new_ctx.port}") - except Exception as e: - print(f"CRITICAL ERROR: Failed to reconnect to {self.host} as user {user_to_use} on new port {port_to_use}.") - print(f"Manual intervention may be required!") - print(f"Error details: {e}") - if new_ctx and new_ctx.is_connected: - new_ctx.close() + for attempt in range(max_retries): + try: + new_ctx.open() # Explicitly open to test the connection + new_ctx.run("echo 'Successfully reconnected on new port.'", hide=True) + print(f"Successfully re-established connection on {new_ctx.host}:{new_ctx.port}") + break + except Exception as e: + if attempt < max_retries - 1: + print(f"Connection attempt {attempt + 1} failed, retrying in {retry_delay}s...") + time.sleep(retry_delay) + continue + else: + print( + f"CRITICAL ERROR: Failed to reconnect to {self.host} as user {user_to_use} " + f"on new port {port_to_use} after {max_retries} attempts." + ) + print("Manual intervention may be required!") + print(f"Error details: {e}") + if new_ctx and new_ctx.is_connected: + new_ctx.close() return new_ctx @staticmethod def wrap_context(func: Callable): + """Decorator to wrap Fabric tasks with enhanced Context functionality.""" + @wraps(func) def wrapper(c: Context, *args, **kwargs): # Also apply the same robustness for inline_ssh_env and connect_kwargs @@ -102,7 +341,7 @@ def wrapper(c: Context, *args, **kwargs): wrapper_connect_kwargs = c.connect_kwargs.copy() # inline_ssh_env should be a Boolean, not a dictionary - wrapper_inline_ssh_env = getattr(c, 'inline_ssh_env', False) + wrapper_inline_ssh_env = getattr(c, "inline_ssh_env", False) if not isinstance(wrapper_inline_ssh_env, bool): wrapper_inline_ssh_env = False @@ -111,9 +350,10 @@ def wrapper(c: Context, *args, **kwargs): host=c.host, user=c.user, port=c.port, - gateway=getattr(c, 'gateway', None), # Safely get gateway attribute + gateway=getattr(c, "gateway", None), # Safely get gateway attribute connect_kwargs=wrapper_connect_kwargs, - inline_ssh_env=wrapper_inline_ssh_env, # Boolean value + inline_ssh_env=wrapper_inline_ssh_env, # Boolean value ) return func(ctx, *args, **kwargs) - return wrapper \ No newline at end of file + + return wrapper diff --git a/cloudy/web/apache.py b/cloudy/web/apache.py index 6b746f5..99ff990 100644 --- a/cloudy/web/apache.py +++ b/cloudy/web/apache.py @@ -1,79 +1,86 @@ import os + from fabric import task -from cloudy.util.context import Context + +from cloudy.sys.core import sys_reload_service from cloudy.sys.etc import sys_etc_git_commit from cloudy.sys.ports import sys_show_next_available_port -from cloudy.sys.core import sys_reload_service +from cloudy.util.context import Context + @task @Context.wrap_context def web_apache2_install(c: Context): """Install apache2 and related modules.""" - c.sudo('apt -y install apache2') + c.sudo("apt -y install apache2") web_apache2_install_mods(c) util_apache2_bootstrap(c) - sys_etc_git_commit(c, 'Installed apache2') + sys_etc_git_commit(c, "Installed apache2") + @task @Context.wrap_context def util_apache2_bootstrap(c: Context): """Bootstrap Apache2 configuration from local templates.""" - c.sudo('rm -rf /etc/apache2/*') - cfgdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../cfg')) + c.sudo("rm -rf /etc/apache2/*") + cfgdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../cfg")) configs = { - 'apache2/apache2.conf': '/etc/apache2/apache2.conf', - 'apache2/envvars.conf': '/etc/apache2/envvars', - 'apache2/ports.conf': '/etc/apache2/ports.conf' + "apache2/apache2.conf": "/etc/apache2/apache2.conf", + "apache2/envvars.conf": "/etc/apache2/envvars", + "apache2/ports.conf": "/etc/apache2/ports.conf", } for local, remote in configs.items(): localcfg = os.path.expanduser(os.path.join(cfgdir, local)) c.put(localcfg, remote, use_sudo=True) - c.sudo('mkdir -p /etc/apache2/sites-available /etc/apache2/sites-enabled') + c.sudo("mkdir -p /etc/apache2/sites-available /etc/apache2/sites-enabled") + @task @Context.wrap_context -def web_apache2_install_mods(c: Context, py_version='3'): +def web_apache2_install_mods(c: Context, py_version="3"): """Install apache2 related packages.""" - mod_wsgi = 'libapache2-mod-wsgi-py3' if '3' in py_version else 'libapache2-mod-wsgi' - requirements = [mod_wsgi, 'libapache2-mod-rpaf'] + mod_wsgi = "libapache2-mod-wsgi-py3" if "3" in py_version else "libapache2-mod-wsgi" + requirements = [mod_wsgi, "libapache2-mod-rpaf"] c.sudo(f'apt -y install {" ".join(requirements)}') - sys_etc_git_commit(c, 'Installed apache2 and related packages') + sys_etc_git_commit(c, "Installed apache2 and related packages") + @task @Context.wrap_context -def web_apache2_set_port(c: Context, port=''): +def web_apache2_set_port(c: Context, port=""): """Setup Apache2 to listen to a new port.""" - remotecfg = '/etc/apache2/ports.conf' + remotecfg = "/etc/apache2/ports.conf" port = sys_show_next_available_port(c, port) - c.sudo(f'echo "Listen 127.0.0.1:{port}" >> {remotecfg}') - sys_reload_service(c, 'apache2') - sys_etc_git_commit(c, f'Apache now listens on port {port}') + c.sudo(f"sh -c 'echo \"Listen 127.0.0.1:{port}\" >> {remotecfg}'") + sys_reload_service(c, "apache2") + sys_etc_git_commit(c, f"Apache now listens on port {port}") + @task @Context.wrap_context -def web_apache2_setup_domain(c: Context, port: str, domain: str = ''): +def web_apache2_setup_domain(c: Context, port: str, domain: str = ""): """Setup Apache2 config file for a domain.""" - apache_avail_dir = '/etc/apache2/sites-available' - cfgdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../cfg')) - localcfg = os.path.expanduser(os.path.join(cfgdir, 'apache2/site.conf')) - remotecfg = f'{apache_avail_dir}/{domain}' + apache_avail_dir = "/etc/apache2/sites-available" + cfgdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../cfg")) + localcfg = os.path.expanduser(os.path.join(cfgdir, "apache2/site.conf")) + remotecfg = f"{apache_avail_dir}/{domain}" - c.sudo(f'rm -rf {remotecfg}') + c.sudo(f"rm -rf {remotecfg}") c.put(localcfg, remotecfg, use_sudo=True) # Escape domain for sed replacement - escaped_domain = domain.replace('.', r'\.') + escaped_domain = domain.replace(".", r"\.") c.sudo(f'sed -i "s/port_num/{port}/g" {remotecfg}') c.sudo(f'sed -i "s/example\\.com/{escaped_domain}/g" {remotecfg}') - c.sudo(f'chown -R root:root {apache_avail_dir}') - c.sudo(f'chmod -R 755 {apache_avail_dir}') - c.sudo(f'a2ensite {domain}') - + c.sudo(f"chown -R root:root {apache_avail_dir}") + c.sudo(f"chmod -R 755 {apache_avail_dir}") + c.sudo(f"a2ensite {domain}") + web_apache2_set_port(c, port) - sys_reload_service(c, 'apache2') - sys_etc_git_commit(c, f'Setup Apache Config for Domain {domain}') + sys_reload_service(c, "apache2") + sys_etc_git_commit(c, f"Setup Apache Config for Domain {domain}") diff --git a/cloudy/web/geoip.py b/cloudy/web/geoip.py index 0a3dc2f..b9fe442 100644 --- a/cloudy/web/geoip.py +++ b/cloudy/web/geoip.py @@ -1,65 +1,71 @@ -import os from fabric import task -from cloudy.util.context import Context + from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def web_geoip_install_requirements(c: Context): """Install GeoIP build requirements.""" requirements = [ - 'zlibc', - 'zlib1g-dev', - 'libssl-dev', - 'build-essential', - 'libtool', + "zlibc", + "zlib1g-dev", + "libssl-dev", + "build-essential", + "libtool", ] c.sudo(f'apt -y install {" ".join(requirements)}') - sys_etc_git_commit(c, 'Installed GeoIP requirements') + sys_etc_git_commit(c, "Installed GeoIP requirements") + @task @Context.wrap_context def web_geoip_install_maxmind_api(c: Context): """Install Maxmind C API.""" - tmp_dir = '/tmp/maxmind' - geoip_url = 'http://www.maxmind.com/download/geoip/api/c/GeoIP.tar.gz' - c.sudo(f'rm -rf {tmp_dir} && mkdir -p {tmp_dir}') + tmp_dir = "/tmp/maxmind" + geoip_url = "http://www.maxmind.com/download/geoip/api/c/GeoIP.tar.gz" + c.sudo(f"rm -rf {tmp_dir} && mkdir -p {tmp_dir}") with c.cd(tmp_dir): - c.sudo(f'wget {geoip_url}') - c.sudo('tar xvf GeoIP.tar.gz') + c.sudo(f"wget {geoip_url}") + c.sudo("tar xvf GeoIP.tar.gz") # The extracted folder may vary, so use a wildcard. - with c.cd('GeoIP-*'): - c.sudo('./configure') - c.sudo('make') - c.sudo('make install') - sys_etc_git_commit(c, 'Installed Maxmind C API') + with c.cd("GeoIP-*"): + c.sudo("./configure") + c.sudo("make") + c.sudo("make install") + sys_etc_git_commit(c, "Installed Maxmind C API") + @task @Context.wrap_context -def web_geoip_install_maxmind_country(c: Context, dest_dir='/srv/www/shared/geoip'): +def web_geoip_install_maxmind_country(c: Context, dest_dir="/srv/www/shared/geoip"): """Install Maxmind Country Lite database.""" - tmp_dir = '/tmp/maxmind' - geo_country_url = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz' - c.sudo(f'mkdir -p {tmp_dir}') + tmp_dir = "/tmp/maxmind" + geo_country_url = ( + "http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz" + ) + c.sudo(f"mkdir -p {tmp_dir}") with c.cd(tmp_dir): - c.sudo(f'wget -N -q {geo_country_url}') - c.sudo('gunzip -c GeoIP.dat.gz > GeoIP.dat') - c.sudo(f'mkdir -p {dest_dir}') - c.sudo(f'chown -R :www-data {dest_dir}') - c.sudo(f'mv -f *.dat {dest_dir}') - c.sudo(f'chmod -R g+wrx {dest_dir}') + c.sudo(f"wget -N -q {geo_country_url}") + c.sudo("gunzip GeoIP.dat.gz") + c.sudo(f"mkdir -p {dest_dir}") + c.sudo(f"chown -R :www-data {dest_dir}") + c.sudo(f"mv -f *.dat {dest_dir}") + c.sudo(f"chmod -R g+wrx {dest_dir}") + @task @Context.wrap_context -def web_geoip_install_maxmind_city(c: Context, dest_dir='/srv/www/shared/geoip'): +def web_geoip_install_maxmind_city(c: Context, dest_dir="/srv/www/shared/geoip"): """Install Maxmind City Lite database.""" - tmp_dir = '/tmp/maxmind' - geo_city_url = 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz' - c.sudo(f'mkdir -p {tmp_dir}') + tmp_dir = "/tmp/maxmind" + geo_city_url = "http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz" + c.sudo(f"mkdir -p {tmp_dir}") with c.cd(tmp_dir): - c.sudo(f'wget -N -q {geo_city_url}') - c.sudo('gunzip -c GeoLiteCity.dat.gz > GeoLiteCity.dat') - c.sudo(f'mkdir -p {dest_dir}') - c.sudo(f'chown -R :www-data {dest_dir}') - c.sudo(f'mv -f *.dat {dest_dir}') - c.sudo(f'chmod -R g+wrx {dest_dir}') + c.sudo(f"wget -N -q {geo_city_url}") + c.sudo("gunzip GeoLiteCity.dat.gz") + c.sudo(f"mkdir -p {dest_dir}") + c.sudo(f"chown -R :www-data {dest_dir}") + c.sudo(f"mv -f *.dat {dest_dir}") + c.sudo(f"chmod -R g+wrx {dest_dir}") diff --git a/cloudy/web/nginx.py b/cloudy/web/nginx.py index f650287..2dfb537 100644 --- a/cloudy/web/nginx.py +++ b/cloudy/web/nginx.py @@ -1,77 +1,85 @@ import os + from fabric import task -from cloudy.util.context import Context -from cloudy.sys.etc import sys_etc_git_commit + from cloudy.sys.core import sys_restart_service +from cloudy.sys.etc import sys_etc_git_commit +from cloudy.util.context import Context + @task @Context.wrap_context def web_nginx_install(c: Context): """Install Nginx and bootstrap configuration.""" - c.sudo('apt -y install nginx') + c.sudo("apt -y install nginx") web_nginx_bootstrap(c) - sys_restart_service(c, 'nginx') - sys_etc_git_commit(c, 'Installed Nginx') + sys_restart_service(c, "nginx") + sys_etc_git_commit(c, "Installed Nginx") + @task @Context.wrap_context def web_nginx_bootstrap(c: Context): """Bootstrap Nginx configuration from local templates.""" - c.sudo('rm -rf /etc/nginx/*') - cfgdir = os.path.join(os.path.dirname(__file__), '../cfg') + c.sudo("rm -rf /etc/nginx/*") + cfgdir = os.path.join(os.path.dirname(__file__), "../cfg") configs = { - 'nginx/nginx.conf': '/etc/nginx/nginx.conf', - 'nginx/mime.types.conf': '/etc/nginx/mime.types' + "nginx/nginx.conf": "/etc/nginx/nginx.conf", + "nginx/mime.types.conf": "/etc/nginx/mime.types", } for local, remote in configs.items(): localcfg = os.path.expanduser(os.path.join(cfgdir, local)) - c.put(localcfg, remote, use_sudo=True) + # Put to temp location, then move with sudo + temp_path = f"/tmp/{os.path.basename(remote)}" + c.put(localcfg, temp_path) + c.sudo(f"mv {temp_path} {remote}") + c.sudo(f"chown root:root {remote}") + c.sudo(f"chmod 644 {remote}") + + c.sudo("mkdir -p /etc/nginx/sites-available") + c.sudo("mkdir -p /etc/nginx/sites-enabled") - c.sudo('mkdir -p /etc/nginx/sites-available') - c.sudo('mkdir -p /etc/nginx/sites-enabled') @task @Context.wrap_context -def web_nginx_copy_ssl(c: Context, domain: str, crt_dir: str = '~/.ssh/certificates/'): +def web_nginx_copy_ssl(c: Context, domain: str, crt_dir: str = "~/.ssh/certificates/"): """Move SSL certificate and key to the server.""" - c.sudo('mkdir -p /etc/ssl/nginx/crt/') - c.sudo('mkdir -p /etc/ssl/nginx/key/') - c.sudo('chmod -R 755 /etc/ssl/nginx/') + c.sudo("mkdir -p /etc/ssl/nginx/crt/") + c.sudo("mkdir -p /etc/ssl/nginx/key/") + c.sudo("chmod -R 755 /etc/ssl/nginx/") crt_dir = os.path.expanduser(crt_dir) if not os.path.exists(crt_dir): - print(f'⚠️ Local certificate dir not found: {crt_dir}') + print(f"⚠️ Local certificate dir not found: {crt_dir}") return - localcrt = os.path.join(crt_dir, f'{domain}.combo.crt') - remotecrt = f'/etc/ssl/nginx/crt/{domain}.combo.crt' + localcrt = os.path.join(crt_dir, f"{domain}.combo.crt") + remotecrt = f"/etc/ssl/nginx/crt/{domain}.combo.crt" c.put(localcrt, remotecrt, use_sudo=True) - localkey = os.path.join(crt_dir, f'{domain}.key') - remotekey = f'/etc/ssl/nginx/key/{domain}.key' + localkey = os.path.join(crt_dir, f"{domain}.key") + remotekey = f"/etc/ssl/nginx/key/{domain}.key" c.put(localkey, remotekey, use_sudo=True) + @task @Context.wrap_context def web_nginx_setup_domain( c: Context, domain: str, - proto: str = 'http', - interface: str = '*', - upstream_address: str = '', - upstream_port: str = '' + proto: str = "http", + interface: str = "*", + upstream_address: str = "", + upstream_port: str = "", ): """Setup Nginx config file for a domain.""" - if 'https' in proto or 'ssl' in proto: - proto = 'https' - ssl_crt = f'/etc/ssl/nginx/crt/{domain}.combo.crt' - ssl_key = f'/etc/ssl/nginx/key/{domain}.key' - if not c.sudo(f'test -f {ssl_crt}', warn=True).ok or not c.sudo(f'test -f {ssl_key}', warn=True).ok: - print(f'⚠️ SSL certificate and key not found.\n{ssl_crt}\n{ssl_key}') - - cfgdir = os.path.join(os.path.dirname(__file__), '../cfg') - nginx_avail_dir = '/etc/nginx/sites-available' - nginx_enabled_dir = '/etc/nginx/sites-enabled' - - localcfg = os.path.expanduser(os.path.join(cfgdir, f'nginx/{proto}.conf')) + if "https" in proto or "ssl" in proto: + proto = "https" + ssl_crt = f"/etc/ssl/nginx/crt/{domain}.combo.crt" + ssl_key = f"/etc/ssl/nginx/key/{domain}.key" + if ( + not c.sudo(f"test -f {ssl_crt}", warn=True).ok + or not c.sudo(f"test -f {ssl_key}", warn=True).ok + ): + print(f"⚠️ SSL certificate and key not found.\n{ssl_crt}\n{ssl_key}") diff --git a/cloudy/web/supervisor.py b/cloudy/web/supervisor.py index b3cbb0c..fafb55b 100644 --- a/cloudy/web/supervisor.py +++ b/cloudy/web/supervisor.py @@ -1,46 +1,51 @@ import os + from fabric import task -from cloudy.util.context import Context + +from cloudy.sys.core import sys_restart_service from cloudy.sys.etc import sys_etc_git_commit from cloudy.sys.ports import sys_show_next_available_port -from cloudy.sys.core import sys_restart_service, sys_add_default_startup +from cloudy.util.context import Context + @task @Context.wrap_context def web_supervisor_install(c: Context): """Install Supervisor and bootstrap configuration.""" - c.sudo('apt -y install supervisor') + c.sudo("apt -y install supervisor") web_supervisor_bootstrap(c) - sys_etc_git_commit(c, 'Installed Supervisor') + sys_etc_git_commit(c, "Installed Supervisor") + @task @Context.wrap_context def web_supervisor_bootstrap(c: Context): """Bootstrap Supervisor configuration from local templates.""" - c.sudo('rm -rf /etc/supervisor/*') - cfgdir = os.path.join(os.path.dirname( __file__), '../cfg') - localcfg = os.path.expanduser(os.path.join(cfgdir, 'supervisor/supervisord.conf')) - remotecfg = '/etc/supervisor/supervisord.conf' + c.sudo("rm -rf /etc/supervisor/*") + cfgdir = os.path.join(os.path.dirname(__file__), "../cfg") + localcfg = os.path.expanduser(os.path.join(cfgdir, "supervisor/supervisord.conf")) + remotecfg = "/etc/supervisor/supervisord.conf" c.put(localcfg, remotecfg, use_sudo=True) - c.sudo('mkdir -p /etc/supervisor/sites-available') - c.sudo('mkdir -p /etc/supervisor/sites-enabled') - c.sudo('chown -R root:root /etc/supervisor') - c.sudo('chmod -R 644 /etc/supervisor') - c.sys_add_default_startup('supervisor') - sys_restart_service(c, 'supervisor') + c.sudo("mkdir -p /etc/supervisor/sites-available") + c.sudo("mkdir -p /etc/supervisor/sites-enabled") + c.sudo("chown -R root:root /etc/supervisor") + c.sudo("chmod -R 644 /etc/supervisor") + c.sys_add_default_startup("supervisor") + sys_restart_service(c, "supervisor") + @task @Context.wrap_context -def web_supervisor_setup_domain(c: Context, domain, port=None, interface='0.0.0.0', worker_num=3): +def web_supervisor_setup_domain(c: Context, domain, port=None, interface="0.0.0.0", worker_num=3): """Setup Supervisor config file for a domain.""" - supervisor_avail_dir = '/etc/supervisor/sites-available' - supervisor_enabled_dir = '/etc/supervisor/sites-enabled' + supervisor_avail_dir = "/etc/supervisor/sites-available" + supervisor_enabled_dir = "/etc/supervisor/sites-enabled" - cfgdir = os.path.join(os.path.dirname( __file__), '../cfg') - localcfg = os.path.expanduser(os.path.join(cfgdir, 'supervisor/site.conf')) - remotecfg = f'{supervisor_avail_dir}/{domain}.conf' - c.sudo(f'rm -rf {remotecfg}') + cfgdir = os.path.join(os.path.dirname(__file__), "../cfg") + localcfg = os.path.expanduser(os.path.join(cfgdir, "supervisor/site.conf")) + remotecfg = f"{supervisor_avail_dir}/{domain}.conf" + c.sudo(f"rm -rf {remotecfg}") c.put(localcfg, remotecfg, use_sudo=True) if not port: port = sys_show_next_available_port(c) @@ -49,13 +54,10 @@ def web_supervisor_setup_domain(c: Context, domain, port=None, interface='0.0.0. c.sudo(f'sed -i "s/worker_num/{worker_num}/g" {remotecfg}') escaped_domain = domain.replace(".", "\\.") c.sudo(f'sed -i "s/example\\.com/{escaped_domain}/g" {remotecfg}') - c.sudo(f'chown -R root:root {supervisor_avail_dir}') - c.sudo(f'chmod -R 755 {supervisor_avail_dir}') + c.sudo(f"chown -R root:root {supervisor_avail_dir}") + c.sudo(f"chmod -R 755 {supervisor_avail_dir}") with c.cd(supervisor_enabled_dir): - c.sudo(f'ln -sf {remotecfg}') - sys_restart_service(c, 'supervisor') - c.sudo(f'supervisorctl restart {domain}') - sys_etc_git_commit(c, f'Setup Supervisor Config for Domain {domain}') - - - + c.sudo(f"ln -sf {remotecfg}") + sys_restart_service(c, "supervisor") + c.sudo(f"supervisorctl restart {domain}") + sys_etc_git_commit(c, f"Setup Supervisor Config for Domain {domain}") diff --git a/cloudy/web/www.py b/cloudy/web/www.py index 2d732f2..d3bb690 100644 --- a/cloudy/web/www.py +++ b/cloudy/web/www.py @@ -1,93 +1,95 @@ from fabric import task -from cloudy.util.context import Context + from cloudy.sys.core import sys_reload_service +from cloudy.util.context import Context + @task @Context.wrap_context -def web_create_data_directory(c: Context, web_dir='/srv/www'): +def web_create_data_directory(c: Context, web_dir="/srv/www"): """Create a data directory for the web files.""" - c.sudo(f'mkdir -p {web_dir}') + c.sudo(f"mkdir -p {web_dir}") @task @Context.wrap_context -def web_create_shared_directory(c: Context, shared_dir='/srv/www/shared'): +def web_create_shared_directory(c: Context, shared_dir="/srv/www/shared"): """Create a shared directory for the site.""" - c.sudo(f'mkdir -p {shared_dir}') - c.sudo(f'chown -R :www-data {shared_dir}') - c.sudo(f'chmod -R g+wrx {shared_dir}') + c.sudo(f"mkdir -p {shared_dir}") + c.sudo(f"chown -R :www-data {shared_dir}") + c.sudo(f"chmod -R g+wrx {shared_dir}") + @task @Context.wrap_context -def web_create_seekrets_directory(c: Context, seekrets_dir='/srv/www/seekrets'): +def web_create_seekrets_directory(c: Context, seekrets_dir="/srv/www/seekrets"): """Create a seekrets directory.""" - c.sudo(f'mkdir -p {seekrets_dir}') - c.sudo(f'chown -R :www-data {seekrets_dir}') - c.sudo(f'chmod -R g+wrx {seekrets_dir}') + c.sudo(f"mkdir -p {seekrets_dir}") + c.sudo(f"chown -R :www-data {seekrets_dir}") + c.sudo(f"chmod -R g+wrx {seekrets_dir}") + @task @Context.wrap_context def web_create_site_directory(c: Context, domain): """Create a site directory structure for a domain.""" - path = f'/srv/www/{domain}' - c.sudo(f'mkdir -p {path}/{{pri,pub,log,bck}}') - c.sudo(f'chown -R :www-data {path}') - c.sudo(f'chmod -R g+w {path}/pub') - c.sudo(f'chmod -R g+w {path}/log') + path = f"/srv/www/{domain}" + c.sudo(f"mkdir -p {path}/{{pri,pub,log,bck}}") + c.sudo(f"chown -R :www-data {path}") + c.sudo(f"chmod -R g+w {path}/pub") + c.sudo(f"chmod -R g+w {path}/log") + @task @Context.wrap_context -def web_create_virtual_env(c: Context, domain, py_version='3'): +def web_create_virtual_env(c: Context, domain, py_version="3"): """Create a virtualenv for a domain.""" - path = f'/srv/www/{domain}/pri' + path = f"/srv/www/{domain}/pri" with c.cd(path): - c.sudo(f'python{py_version} -m venv venv') - c.sudo('chown -R :www-data venv') - c.sudo('chmod -R g+wrx venv') + c.sudo(f"python{py_version} -m venv venv") + c.sudo("chown -R :www-data venv") + c.sudo("chmod -R g+wrx venv") + @task @Context.wrap_context def web_create_site_log_file(c: Context, domain): """Create a log file with proper permissions for Django.""" - site_logfile = f'/srv/www/{domain}/log/{domain}.log' - c.sudo(f'touch {site_logfile}') - c.sudo(f'chown :www-data {site_logfile}') - c.sudo(f'chmod g+rw {site_logfile}') + site_logfile = f"/srv/www/{domain}/log/{domain}.log" + c.sudo(f"touch {site_logfile}") + c.sudo(f"chown :www-data {site_logfile}") + c.sudo(f"chmod g+rw {site_logfile}") + @task @Context.wrap_context -def web_prepare_site(c: Context, domain, py_version='3'): +def web_prepare_site(c: Context, domain, py_version="3"): """Create a site directory and everything else for the site on production server.""" web_create_site_directory(c, domain) web_create_virtual_env(c, domain, py_version) web_create_site_log_file(c, domain) + @task @Context.wrap_context def web_deploy(c: Context, domain): """Push changes to a production server.""" - webroot = f'/srv/www/{domain}/pri/venv/webroot' + webroot = f"/srv/www/{domain}/pri/venv/webroot" with c.cd(webroot): - with c.prefix(f'source {webroot}/../bin/activate'): - c.run('git pull') - c.run('pip install -r env/deploy_reqs.txt') - c.run('bin/manage.py collectstatic --noinput') - c.run('bin/manage.py migrate') - sys_reload_service(c, 'nginx') - c.sudo(f'supervisorctl restart {domain}') + with c.prefix(f"source {webroot}/../bin/activate"): + c.run("git pull") + c.run("pip install -r env/deploy_reqs.txt") + c.run("bin/manage.py collectstatic --noinput") + c.run("bin/manage.py migrate") + sys_reload_service(c, "nginx") + c.sudo(f"supervisorctl restart {domain}") + @task @Context.wrap_context def web_run_command(c: Context, domain, command): """Run a command from the webroot directory of a domain on a production server.""" - webroot = f'/srv/www/{domain}/pri/venv/webroot' + webroot = f"/srv/www/{domain}/pri/venv/webroot" with c.cd(webroot): - with c.prefix(f'source {webroot}/../bin/activate'): + with c.prefix(f"source {webroot}/../bin/activate"): c.run(command) - - - - - - - diff --git a/dev-requirements.txt b/dev-requirements.txt deleted file mode 100644 index 81c6ad4..0000000 --- a/dev-requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -setuptools>=80.9.0 -colorama>=0.4.6 -fabric>=3.2.2 -apache-libcloud>=3.8.0 -s3cmd>=2.4.0 \ No newline at end of file diff --git a/fabfile.py b/fabfile.py index b6c0002..2ed6c4c 100644 --- a/fabfile.py +++ b/fabfile.py @@ -1,55 +1,407 @@ import logging +import sys as system from fabric import task from invoke.collection import Collection +from invoke import Context as InvokeContext +from paramiko.ssh_exception import AuthenticationException, SSHException + from cloudy.sys import ( - core, docker, etc, python, firewall, memcached, mount, openvpn, ports, - postfix, python, redis, security, ssh, swap, timezone, user, vim + core, + docker, + etc, + firewall, + memcached, + mount, + openvpn, + ports, + postfix, + python, + redis, + security, + ssh, + swap, + timezone, + user, + vim, ) -from cloudy.db import psql, pgis, mysql, pgpool, pgbouncer +from cloudy.db import mysql, pgbouncer, pgis, pgpool, psql +from cloudy.web import apache, geoip, nginx, supervisor, www +from cloudy.aws import ec2 from cloudy.srv import ( recipe_cache_redis, + recipe_database_psql_gis, recipe_generic_server, + recipe_loadbalancer_nginx, + recipe_standalone_server, + recipe_vpn_server, recipe_webserver_django, - recipe_database_psql_gis, - recipe_loadbalancer_nginx, - recipe_standalone_server, - recipe_vpn_server ) + logging.getLogger().setLevel(logging.ERROR) -# Automatically register all tasks in this file -ns = Collection.from_module(__import__(__name__)) - -ns.add_collection(Collection.from_module(core), name='core') -ns.add_collection(Collection.from_module(docker), name='docker') -ns.add_collection(Collection.from_module(etc), name='etc') -ns.add_collection(Collection.from_module(python), name='python') -ns.add_collection(Collection.from_module(firewall), name='firewall') -ns.add_collection(Collection.from_module(memcached), name='memcached') -ns.add_collection(Collection.from_module(mount), name='mount') -ns.add_collection(Collection.from_module(openvpn), name='openvpn') -ns.add_collection(Collection.from_module(ports), name='ports') -ns.add_collection(Collection.from_module(postfix), name='postfix') -ns.add_collection(Collection.from_module(python), name='python') -ns.add_collection(Collection.from_module(redis), name='redis') -ns.add_collection(Collection.from_module(security), name='security') -ns.add_collection(Collection.from_module(ssh), name='ssh') -ns.add_collection(Collection.from_module(swap), name='swap') -ns.add_collection(Collection.from_module(timezone), name='timezone') -ns.add_collection(Collection.from_module(user), name='user') -ns.add_collection(Collection.from_module(vim), name='vim') -ns.add_collection(Collection.from_module(psql), name='psql') -ns.add_collection(Collection.from_module(pgis), name='pgis') -ns.add_collection(Collection.from_module(mysql), name='mysql') -ns.add_collection(Collection.from_module(pgpool), name='pgpool') -ns.add_collection(Collection.from_module(pgbouncer), name='pgbouncer') - - -ns.add_collection(Collection.from_module(recipe_cache_redis), name='recipe_cache_redis') -ns.add_collection(Collection.from_module(recipe_generic_server), name='recipe_generic_server') -ns.add_collection(Collection.from_module(recipe_webserver_django), name='recipe_webserver_django') -ns.add_collection(Collection.from_module(recipe_database_psql_gis), name='recipe_database_psql_gis') -ns.add_collection(Collection.from_module(recipe_loadbalancer_nginx), name='recipe_loadbalancer_nginx') -ns.add_collection(Collection.from_module(recipe_standalone_server), name='recipe_standalone_server') -ns.add_collection(Collection.from_module(recipe_vpn_server), name='recipe_vpn_server') +# Add global configuration for verbose and debug modes +def configure_context(c: InvokeContext): + """Configure context with verbose/debug flags from command line.""" + # These will be set by command-line flags like --verbose or --debug + if hasattr(c.config, 'run') and hasattr(c.config.run, 'verbose'): + c.config.cloudy_verbose = c.config.run.verbose + if hasattr(c.config, 'run') and hasattr(c.config.run, 'debug'): + c.config.cloudy_debug = c.config.run.debug + + +@task +def help(c): + """📖 Python Cloudy - Infrastructure automation toolkit + + 🚀 RECIPE COMMANDS (High-level server deployment) + ├── recipe.gen-install - Complete server setup with users, security, etc. + ├── recipe.redis-install - Redis cache server setup + ├── recipe.psql-install - PostGIS-enabled database setup + ├── recipe.web-install - Django web server setup + ├── recipe.lb-install - Nginx load balancer setup + ├── recipe.vpn-install - VPN server setup + └── recipe.sta-install - Standalone server setup + + 🎛️ GLOBAL FLAGS (for any command) + ├── --debug, -d - Enable Fabric debug mode + all output + ├── --echo, -e - Echo commands before running + └── CLOUDY_VERBOSE=1 - Environment variable for verbose output + + 🔧 SYSTEM COMMANDS + ├── sys.init - Initialize and update system + ├── sys.hostname - Set system hostname + ├── sys.users - User management (add, delete, password) + ├── sys.ssh - SSH configuration and security + ├── sys.services - Service management (start, stop, restart) + + 🗄️ DATABASE COMMANDS + ├── db.pg.* - PostgreSQL (17 commands) + ├── db.my.* - MySQL (6 commands) + ├── db.pgb.* - PgBouncer (3 commands) + ├── db.pgp.* - PgPool (2 commands) + └── db.gis.* - PostGIS (3 commands) + + 🌐 WEB SERVER COMMANDS + ├── web.apache.* - Apache configuration + ├── web.nginx.* - Nginx configuration + ├── web.supervisor.* - Process management + └── web.ssl.* - SSL certificate management + + 🔒 SECURITY & FIREWALL + ├── fw.* - Firewall configuration (17 commands) + ├── security.* - Security hardening + + ☁️ CLOUD COMMANDS + └── aws.* - EC2 instance management (17 commands) + + 📋 EXAMPLES: + + fab recipe.gen-install --cfg-file="./.cloudy.production" + fab db.pg.create-user --username=myuser --password=mypass + fab sys.hostname --hostname=myserver.com + fab fw.allow-http + + Use 'fab -l' to see all available commands. + """ + print(help.__doc__) + + +# Create clean command structure +ns = Collection() +ns.add_task(help) + +# RECIPE COMMANDS - High-level deployment recipes +recipe = Collection("recipe") +recipe.add_task(recipe_generic_server.setup_server, name="gen-install") +recipe.add_task(recipe_cache_redis.setup_redis, name="redis-install") +recipe.add_task(recipe_database_psql_gis.setup_db, name="psql-install") +recipe.add_task(recipe_webserver_django.setup_web, name="web-install") +recipe.add_task(recipe_loadbalancer_nginx.setup_lb, name="lb-install") +recipe.add_task(recipe_vpn_server.setup_openvpn, name="vpn-install") +recipe.add_task(recipe_standalone_server.setup_standalone, name="sta-install") +ns.add_collection(recipe) + +# SYSTEM COMMANDS - All core system functionality +sys = Collection("sys") + +# Core system functions +sys.add_task(core.sys_init, name="init") +sys.add_task(core.sys_update, name="update") +sys.add_task(core.sys_upgrade, name="upgrade") +sys.add_task(core.sys_safe_upgrade, name="safe-upgrade") +sys.add_task(core.sys_hostname_configure, name="hostname") +sys.add_task(core.sys_uname, name="uname") +sys.add_task(core.sys_show_process_by_memory_usage, name="memory-usage") +sys.add_task(core.sys_start_service, name="start-service") +sys.add_task(core.sys_stop_service, name="stop-service") +sys.add_task(core.sys_restart_service, name="restart-service") +sys.add_task(core.sys_reload_service, name="reload-service") +sys.add_task(core.sys_git_install, name="install-git") +sys.add_task(core.sys_install_common, name="install-common") +sys.add_task(core.sys_git_configure, name="configure-git") +sys.add_task(core.sys_add_hosts, name="add-hosts") +sys.add_task(core.sys_locale_configure, name="configure-locale") +sys.add_task(core.sys_mkdir, name="mkdir") +sys.add_task(core.sys_shutdown, name="shutdown") + +# User management +sys.add_task(user.sys_user_add, name="add-user") +sys.add_task(user.sys_user_delete, name="delete-user") +sys.add_task(user.sys_user_change_password, name="change-password") +sys.add_task(user.sys_user_add_sudoer, name="add-sudoer") +sys.add_task(user.sys_user_add_passwordless_sudoer, name="add-passwordless-sudoer") +sys.add_task(user.sys_user_remove_sudoer, name="remove-sudoer") + +# SSH configuration +sys.add_task(ssh.sys_ssh_set_port, name="ssh-port") +sys.add_task(ssh.sys_ssh_disable_root_login, name="ssh-disable-root") +sys.add_task(ssh.sys_ssh_enable_password_authentication, name="ssh-enable-password") +sys.add_task(ssh.sys_ssh_push_public_key, name="ssh-push-key") + +# Time and locale +sys.add_task(timezone.sys_configure_timezone, name="timezone") + +# Other system utilities +sys.add_task(swap.sys_swap_configure, name="configure-swap") +sys.add_task(python.sys_python_install_common, name="install-python") +sys.add_task(vim.sys_set_default_editor, name="set-editor") +sys.add_task(postfix.sys_install_postfix, name="install-postfix") +sys.add_task(ports.sys_show_next_available_port, name="next-port") + +# Git and etc management +sys.add_task(etc.sys_etc_git_init, name="git-init-etc") +sys.add_task(etc.sys_etc_git_commit, name="git-commit-etc") + +ns.add_collection(sys) + +# DATABASE COMMANDS - All database functionality +db = Collection("db") + +# PostgreSQL commands → db.pg.* +pg = Collection("pg") +pg.add_task(psql.db_psql_install, name="install") +pg.add_task(psql.db_psql_client_install, name="client-install") +pg.add_task(psql.db_psql_configure, name="configure") +pg.add_task(psql.db_psql_create_cluster, name="create-cluster") +pg.add_task(psql.db_psql_remove_cluster, name="remove-cluster") +pg.add_task(psql.db_psql_create_user, name="create-user") +pg.add_task(psql.db_psql_delete_user, name="delete-user") +pg.add_task(psql.db_psql_user_password, name="set-user-pass") +pg.add_task(psql.db_psql_create_database, name="create-db") +pg.add_task(psql.db_psql_delete_database, name="delete-db") +pg.add_task(psql.db_psql_list_users, name="list-users") +pg.add_task(psql.db_psql_list_databases, name="list-dbs") +pg.add_task(psql.db_psql_dump_database, name="dump") +pg.add_task(psql.db_psql_grant_database_privileges, name="grant-privs") +pg.add_task(psql.db_psql_create_gis_database, name="create-gis-db") +pg.add_task(psql.db_psql_latest_version, name="latest-version") +pg.add_task(psql.db_psql_default_installed_version, name="installed-version") +db.add_collection(pg) + +# MySQL commands → db.my.* +my = Collection("my") +my.add_task(mysql.db_mysql_server_install, name="install") +my.add_task(mysql.db_mysql_client_install, name="client-install") +my.add_task(mysql.db_mysql_set_root_password, name="set-root-pass") +my.add_task(mysql.db_mysql_create_database, name="create-db") +my.add_task(mysql.db_mysql_create_user, name="create-user") +my.add_task(mysql.db_mysql_grant_user, name="grant-user") +my.add_task(mysql.db_mysql_latest_version, name="latest-version") +db.add_collection(my) + +# PgBouncer commands → db.pgb.* +pgb = Collection("pgb") +pgb.add_task(pgbouncer.db_pgbouncer_install, name="install") +pgb.add_task(pgbouncer.db_pgbouncer_configure, name="configure") +pgb.add_task(pgbouncer.db_pgbouncer_set_user_password, name="set-user-pass") +db.add_collection(pgb) + +# PgPool commands → db.pgp.* +pgp = Collection("pgp") +pgp.add_task(pgpool.db_pgpool2_install, name="install") +pgp.add_task(pgpool.db_pgpool2_configure, name="configure") +db.add_collection(pgp) + +# PostGIS commands → db.gis.* +gis = Collection("gis") +gis.add_task(pgis.db_pgis_install, name="install") +gis.add_task(pgis.db_pgis_configure, name="configure") +gis.add_task(pgis.db_pgis_get_database_gis_info, name="info") +gis.add_task(pgis.db_pgis_get_latest_version, name="latest-version") +db.add_collection(gis) + +ns.add_collection(db) + +# WEB SERVER COMMANDS - All web server functionality +web = Collection("web") + +# Apache commands → web.apache.* +apache_collection = Collection("apache") +apache_collection.add_task(apache.web_apache2_install, name="install") +apache_collection.add_task(apache.web_apache2_setup_domain, name="configure-domain") +apache_collection.add_task(apache.web_apache2_set_port, name="configure-port") +web.add_collection(apache_collection) + +# Nginx commands → web.nginx.* +nginx_collection = Collection("nginx") +nginx_collection.add_task(nginx.web_nginx_install, name="install") +nginx_collection.add_task(nginx.web_nginx_setup_domain, name="setup-domain") +nginx_collection.add_task(nginx.web_nginx_copy_ssl, name="copy-ssl") +web.add_collection(nginx_collection) + +# Supervisor commands → web.supervisor.* +supervisor_collection = Collection("supervisor") +supervisor_collection.add_task(supervisor.web_supervisor_install, name="install") +supervisor_collection.add_task(supervisor.web_supervisor_setup_domain, name="setup-domain") +web.add_collection(supervisor_collection) + +# WWW/Site commands → web.site.* +site = Collection("site") +site.add_task(www.web_create_data_directory, name="create-data-dir") +site.add_task(www.web_create_shared_directory, name="create-shared-dir") +site.add_task(www.web_create_site_directory, name="create-site-dir") +site.add_task(www.web_create_virtual_env, name="create-venv") +site.add_task(www.web_prepare_site, name="prepare-site") +web.add_collection(site) + +# GeoIP commands → web.geoip.* +geoip_collection = Collection("geoip") +geoip_collection.add_task(geoip.web_geoip_install_requirements, name="install-requirements") +geoip_collection.add_task(geoip.web_geoip_install_maxmind_api, name="install-api") +geoip_collection.add_task(geoip.web_geoip_install_maxmind_country, name="install-country") +geoip_collection.add_task(geoip.web_geoip_install_maxmind_city, name="install-city") +web.add_collection(geoip_collection) + +ns.add_collection(web) + +# FIREWALL COMMANDS - All firewall functionality +fw = Collection("fw") +fw.add_task(firewall.fw_install, name="install") +fw.add_task(firewall.fw_secure_server, name="secure-server") +fw.add_task(firewall.fw_allow_incoming_port, name="allow-port") +fw.add_task(firewall.fw_allow_incoming_http, name="allow-http") +fw.add_task(firewall.fw_allow_incoming_https, name="allow-https") +fw.add_task(firewall.fw_allow_incoming_postgresql, name="allow-postgresql") +fw.add_task(firewall.fw_allow_incoming_port_proto, name="allow-port-proto") +fw.add_task(firewall.fw_allow_incoming_host_port, name="allow-host-port") +fw.add_task(firewall.fw_disable, name="disable") +fw.add_task(firewall.fw_wide_open, name="wide-open") +fw.add_task(firewall.fw_reload_ufw, name="reload") +ns.add_collection(fw) + +# SECURITY COMMANDS +security_collection = Collection("security") +security_collection.add_task(security.sys_security_install_common, name="install-common") +ns.add_collection(security_collection) + +# SERVICES COMMANDS +services = Collection("services") + +# Docker +docker_collection = Collection("docker") +docker_collection.add_task(docker.sys_docker_install, name="install") +docker_collection.add_task(docker.sys_docker_config, name="configure") +docker_collection.add_task(docker.sys_docker_user_group, name="add-user") +services.add_collection(docker_collection) + +# Redis/Cache +cache = Collection("cache") +cache.add_task(redis.sys_redis_install, name="install") +cache.add_task(redis.sys_redis_config, name="configure") +cache.add_task(redis.sys_redis_configure_port, name="port") +cache.add_task(redis.sys_redis_configure_pass, name="password") +cache.add_task(redis.sys_redis_configure_memory, name="memory") +cache.add_task(redis.sys_redis_configure_interface, name="interface") +services.add_collection(cache) + +# Memcached +memcached_collection = Collection("memcached") +memcached_collection.add_task(memcached.sys_memcached_install, name="install") +memcached_collection.add_task(memcached.sys_memcached_config, name="configure") +memcached_collection.add_task(memcached.sys_memcached_configure_port, name="port") +memcached_collection.add_task(memcached.sys_memcached_configure_memory, name="memory") +memcached_collection.add_task(memcached.sys_memcached_configure_interface, name="interface") +services.add_collection(memcached_collection) + +# OpenVPN +vpn = Collection("vpn") +vpn.add_task(openvpn.sys_openvpn_docker_install, name="docker-install") +vpn.add_task(openvpn.sys_openvpn_docker_conf, name="docker-conf") +vpn.add_task(openvpn.sys_openvpn_docker_create_client, name="create-client") +vpn.add_task(openvpn.sys_openvpn_docker_revoke_client, name="revoke-client") +vpn.add_task(openvpn.sys_openvpn_docker_show_client_list, name="list-clients") +services.add_collection(vpn) + +ns.add_collection(services) + +# MOUNT/STORAGE COMMANDS +storage = Collection("storage") +storage.add_task(mount.sys_mount_device, name="mount-device") +storage.add_task(mount.sys_mount_fstab_add, name="add-to-fstab") +ns.add_collection(storage) + +# AWS/CLOUD COMMANDS - All EC2 functionality +aws = Collection("aws") +aws.add_task(ec2.aws_list_nodes, name="list-nodes") +aws.add_task(ec2.aws_get_node, name="get-node") +aws.add_task(ec2.aws_create_node, name="create-node") +aws.add_task(ec2.aws_destroy_node, name="destroy-node") +aws.add_task(ec2.aws_list_sizes, name="list-sizes") +aws.add_task(ec2.aws_get_size, name="get-size") +aws.add_task(ec2.aws_list_images, name="list-images") +aws.add_task(ec2.aws_get_image, name="get-image") +aws.add_task(ec2.aws_list_locations, name="list-locations") +aws.add_task(ec2.aws_get_location, name="get-location") +aws.add_task(ec2.aws_list_security_groups, name="list-security-groups") +aws.add_task(ec2.aws_security_group_found, name="find-security-group") +aws.add_task(ec2.aws_list_keypairs, name="list-keypairs") +aws.add_task(ec2.aws_keypair_found, name="find-keypair") +aws.add_task(ec2.aws_create_volume, name="create-volume") +aws.add_task(ec2.aws_list_volumes, name="list-volumes") +ns.add_collection(aws) + + +# Global exception handling for authentication issues +def handle_auth_exception(): + """Provide helpful guidance for SSH authentication failures.""" + print("\n❌ SSH Authentication Failed!") + print("\n🔑 To fix this, you need to set up SSH key authentication:") + print(" 1. Generate SSH key (if you don't have one):") + print(" ssh-keygen -t rsa -b 4096") + print("\n 2. Copy your SSH key to the server:") + print(" ssh-copy-id root@10.10.10.198") + print("\n 3. Test the connection:") + print(" ssh root@10.10.10.198") + print("\n 4. Then retry your Fabric command") + print("\n💡 Alternative: Use password auth (if enabled on server):") + print(" fab -H root@10.10.10.198 --prompt-for-login-password ") + system.exit(1) + + +# Monkey patch Fabric to catch authentication errors globally +original_open = None + + +def patched_connection_open(self): + """Wrapper for Connection.open() to catch auth errors.""" + try: + return original_open(self) + except AuthenticationException: + handle_auth_exception() + except SSHException as e: + if "Authentication failed" in str(e): + handle_auth_exception() + raise + + +# Apply the patch +try: + from fabric.connection import Connection + + if not hasattr(Connection, "_auth_patched"): + original_open = Connection.open + Connection.open = patched_connection_open + Connection._auth_patched = True +except ImportError: + pass diff --git a/lint.sh b/lint.sh new file mode 100755 index 0000000..d2c5c5c --- /dev/null +++ b/lint.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +# Python Cloudy Linting Script +# Runs multiple linting tools to ensure code quality + +set -e + +echo "🔍 Running Python Cloudy linting checks..." +echo "==========================================" + +# Check if we're in a virtual environment +if [[ "$VIRTUAL_ENV" == "" ]]; then + echo "⚠️ Warning: Not in a virtual environment. Consider running:" + echo " source .venv/bin/activate" + echo "" +fi + +# Install linting tools if not present +echo "📦 Ensuring linting tools are installed..." +pip install -q -e ".[dev]" 2>/dev/null || { + echo "⚠️ Failed to install dev dependencies, trying requirements.txt fallback..." + pip install -q black flake8 isort mypy 2>/dev/null || true +} + +# Run Black formatter (100 character line length) +echo "" +echo "🖤 Running Black formatter..." +black --line-length 100 --check --diff cloudy/ || { + echo "❌ Black formatting issues found. Run 'black --line-length 100 cloudy/' to fix." + BLACK_FAILED=1 +} + +# Run isort import sorting +echo "" +echo "📚 Running isort import sorting..." +isort --profile black --line-length 100 --check-only --diff cloudy/ || { + echo "❌ Import sorting issues found. Run 'isort --profile black --line-length 100 cloudy/' to fix." + ISORT_FAILED=1 +} + +# Run flake8 linting +echo "" +echo "🐍 Running flake8 linting..." +flake8 --max-line-length=100 --extend-ignore=E203,W503 cloudy/ || { + echo "❌ Flake8 linting issues found." + FLAKE8_FAILED=1 +} + +# Run mypy type checking (optional, may have many issues initially) +echo "" +echo "🔧 Running mypy type checking..." +mypy cloudy/ --ignore-missing-imports --no-strict-optional 2>/dev/null || { + echo "⚠️ MyPy found type issues (this is expected initially)" + MYPY_FAILED=1 +} + +# Summary +echo "" +echo "📊 Linting Summary:" +echo "==================" + +if [[ "$BLACK_FAILED" == "1" ]]; then + echo "❌ Black: FAILED" +else + echo "✅ Black: PASSED" +fi + +if [[ "$ISORT_FAILED" == "1" ]]; then + echo "❌ isort: FAILED" +else + echo "✅ isort: PASSED" +fi + +if [[ "$FLAKE8_FAILED" == "1" ]]; then + echo "❌ flake8: FAILED" +else + echo "✅ flake8: PASSED" +fi + +if [[ "$MYPY_FAILED" == "1" ]]; then + echo "⚠️ mypy: ISSUES (non-blocking)" +else + echo "✅ mypy: PASSED" +fi + +# Exit with error if critical tools failed +if [[ "$BLACK_FAILED" == "1" || "$ISORT_FAILED" == "1" || "$FLAKE8_FAILED" == "1" ]]; then + echo "" + echo "❌ Linting failed! Please fix the issues above." + exit 1 +fi + +echo "" +echo "✅ All critical linting checks passed!" +echo "" +echo "💡 To auto-fix formatting issues, run:" +echo " black --line-length 100 cloudy/" +echo " isort --profile black --line-length 100 cloudy/" \ No newline at end of file diff --git a/pep8.sh b/pep8.sh deleted file mode 100755 index 58dc0e4..0000000 --- a/pep8.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash - -echo -e "\nRunning: (pep8 --show-source --show-pep8 --select=errors --testsuite=.)\n\n" -pep8 --show-source --show-pep8 --select=errors --testsuite=./ -echo -e "\n\n" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..1ab5f16 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,109 @@ +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "python-cloudy" +version = "0.0.5" +authors = [ + {name = "Val Neekman", email = "info@neekware.com"}, +] +description = "A Python utility that simplifies cloud server configuration and automation" +readme = "README.md" +license = {text = "MIT"} +requires-python = ">=3.8" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Operating System :: POSIX", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: System :: Systems Administration", +] +keywords = ["cloud", "server", "automation", "fabric", "deployment"] +dependencies = [ + "fabric>=3.2.2", + "colorama>=0.4.6", + "apache-libcloud>=3.8.0", + "s3cmd>=2.4.0", +] + +[project.optional-dependencies] +dev = [ + "black>=23.0.0", + "flake8>=6.0.0", + "isort>=5.12.0", + "mypy>=1.0.0", +] + +[project.urls] +"Homepage" = "https://github.com/un33k/python-cloudy" +"Bug Reports" = "https://github.com/un33k/python-cloudy/issues" +"Source" = "https://github.com/un33k/python-cloudy" + +[project.scripts] +cloudy = "cloudy.cli:main" + +[tool.setuptools.packages.find] +where = ["."] +include = ["cloudy*"] + +[tool.black] +line-length = 100 +target-version = ['py38', 'py39', 'py310', 'py311', 'py312'] +include = '\.pyi?$' +extend-exclude = ''' +/( + # directories + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | build + | dist +)/ +''' + +[tool.isort] +profile = "black" +line_length = 100 +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true +ensure_newline_before_comments = true + +[tool.flake8] +max-line-length = 100 +extend-ignore = ["E203", "W503"] +exclude = [ + ".git", + "__pycache__", + "build", + "dist", + ".venv", + ".eggs", +] + +[tool.mypy] +python_version = "3.8" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +check_untyped_defs = true +disallow_untyped_decorators = true +no_implicit_optional = true +warn_redundant_casts = true +warn_unused_ignores = true +warn_no_return = true +warn_unreachable = true +strict_equality = true \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 0adc4e6..f695970 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,14 @@ -fabric>=2.7 -apache-libcloud==3.6.1 -s3cmd==2.3.0 -ipython==8.7.0 \ No newline at end of file +# Production Dependencies +fabric>=3.2.2 +colorama>=0.4.6 +apache-libcloud>=3.8.0 +s3cmd>=2.4.0 + +# Development Dependencies +black>=23.0.0 +flake8>=6.0.0 +isort>=5.12.0 +mypy>=1.0.0 + +# Optional Development Tools +ipython>=8.7.0 \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100755 index 5f2f10b..0000000 --- a/setup.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python3 - -from setuptools import setup -import re -import os -import sys - -name = 'python-cloudy' -package = 'cloudy' -description = 'A Python utility that simplifies cloud configuration' -url = 'https://github.com/un33k/python-cloudy' -author = 'Val Neekman' -author_email = 'info@neekware.com' -license = 'BSD' -install_requires = [ - 'fabric>=3.2.2', - 'colorama>=0.4.6', - 'apache-libcloud>=3.8.0', - 's3cmd>=2.4.0' -] -classifiers = [ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: BSD License', - 'Operating System :: POSIX', - 'Programming Language :: Python', - 'Topic :: Software Development :: Libraries :: Python Modules', -] - -def read(fname): - with open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8') as f: - return f.read() - -def get_version(package): - """ - Return package version as listed in `__version__` in `__init__.py`. - """ - init_py = open(os.path.join(package, '__init__.py'), encoding='utf-8').read() - match = re.search(r"^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE) - if match: - return match.group(1) - raise RuntimeError("Unable to find version string in {}.".format(os.path.join(package, '__init__.py'))) - -def get_packages(package): - """ - Return root package and all sub-packages. - """ - return [ - dirpath - for dirpath, dirnames, filenames in os.walk(package) - if os.path.exists(os.path.join(dirpath, '__init__.py')) - ] - -def get_package_data(package): - """ - Return all files under the root package, that are not in a package themselves. - """ - walk = [ - (dirpath.replace(package + os.sep, '', 1), filenames) - for dirpath, dirnames, filenames in os.walk(package) - if not os.path.exists(os.path.join(dirpath, '__init__.py')) - ] - filepaths = [] - for base, filenames in walk: - filepaths.extend([os.path.join(base, filename) for filename in filenames]) - return {package: filepaths} - -if sys.argv[-1] == 'publish': - os.system("python setup.py sdist upload") - args = {'version': get_version(package)} - print("You probably want to also tag the version now:") - print(" git tag -a %(version)s -m 'version %(version)s'" % args) - print(" git push --tags") - sys.exit() - -setup( - name=name, - version=get_version(package), - url=url, - license=license, - description=description, - long_description=read('README.md'), - long_description_content_type='text/markdown', - author=author, - author_email=author_email, - packages=get_packages(package), - package_data=get_package_data(package), - install_requires=install_requires, - classifiers=classifiers, - python_requires='>=3.8', -) - - diff --git a/test.py b/test.py deleted file mode 100644 index 6ed78ad..0000000 --- a/test.py +++ /dev/null @@ -1,18 +0,0 @@ -import unittest -import logging - -class TestCloudyFunctions(unittest.TestCase): - @classmethod - def setUpClass(cls): - logging.basicConfig(level=logging.DEBUG) - cls.log = logging.getLogger("TestCloudyFunctions") - - def test_manager(self): - # Placeholder for actual tests - self.log.debug("Running test_manager") - self.assertTrue(True) - -if __name__ == '__main__': - unittest.main() - - diff --git a/test.sh b/test.sh new file mode 100755 index 0000000..e385f06 --- /dev/null +++ b/test.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# Python Cloudy Test Script +# +# This script runs the minimal test suite to ensure core functionality +# doesn't break during development. + +set -e # Exit on any error + +echo "🧪 Python Cloudy Test Script" +echo "=============================" + +# Check if virtual environment exists +if [ ! -d ".venv" ]; then + echo "❌ Virtual environment not found!" + echo "💡 Run './bootstrap.sh' to set up the environment first." + exit 1 +fi + +# Activate virtual environment +echo "🔧 Activating virtual environment..." +source .venv/bin/activate + +# Check if we're in the right directory (should have fabfile.py) +if [ ! -f "fabfile.py" ]; then + echo "❌ Error: fabfile.py not found!" + echo "💡 Make sure you're running this from the python-cloudy project root." + exit 1 +fi + +# Run the test suite +echo "🚀 Running test suite..." +echo "" + +python tests/test_runner.py + +# Get the exit code from the test runner +TEST_EXIT_CODE=$? + +echo "" +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo "✅ All tests completed successfully!" +else + echo "❌ Tests failed with exit code: $TEST_EXIT_CODE" + exit $TEST_EXIT_CODE +fi + +echo "" +echo "🎯 Test script completed successfully!" \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..5e5fa98 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,5 @@ +""" +Python Cloudy Test Suite + +This package contains tests for the Python Cloudy infrastructure automation toolkit. +""" diff --git a/tests/test_minimal.py b/tests/test_minimal.py new file mode 100755 index 0000000..cfc2c07 --- /dev/null +++ b/tests/test_minimal.py @@ -0,0 +1,298 @@ +#!/usr/bin/env python +""" +Minimal test suite for Python Cloudy - ensures core functionality doesn't break during development. + +This test suite focuses on: +1. Import integrity - all modules can be imported +2. Task discovery - Fabric can find and load all tasks +3. Command structure - hierarchical namespaces work correctly +4. Configuration system - basic config loading works +""" + +import sys +import unittest +from unittest.mock import Mock, patch +import importlib + + +class TestImports(unittest.TestCase): + """Test that all core modules can be imported without errors.""" + + def test_sys_modules_import(self): + """Test that all sys modules can be imported.""" + sys_modules = [ + "cloudy.sys.core", + "cloudy.sys.user", + "cloudy.sys.ssh", + "cloudy.sys.firewall", + "cloudy.sys.python", + "cloudy.sys.security", + ] + + for module_name in sys_modules: + with self.subTest(module=module_name): + try: + importlib.import_module(module_name) + except ImportError as e: + self.fail(f"Failed to import {module_name}: {e}") + + def test_db_modules_import(self): + """Test that all database modules can be imported.""" + db_modules = [ + "cloudy.db.psql", + "cloudy.db.mysql", + "cloudy.db.pgbouncer", + "cloudy.db.pgpool", + "cloudy.db.pgis", + ] + + for module_name in db_modules: + with self.subTest(module=module_name): + try: + importlib.import_module(module_name) + except ImportError as e: + self.fail(f"Failed to import {module_name}: {e}") + + def test_web_modules_import(self): + """Test that all web modules can be imported.""" + web_modules = [ + "cloudy.web.apache", + "cloudy.web.nginx", + "cloudy.web.supervisor", + "cloudy.web.www", + "cloudy.web.geoip", + ] + + for module_name in web_modules: + with self.subTest(module=module_name): + try: + importlib.import_module(module_name) + except ImportError as e: + self.fail(f"Failed to import {module_name}: {e}") + + def test_aws_modules_import(self): + """Test that AWS modules can be imported.""" + try: + importlib.import_module("cloudy.aws.ec2") + except ImportError as e: + self.fail(f"Failed to import cloudy.aws.ec2: {e}") + + def test_recipe_modules_import(self): + """Test that recipe modules can be imported.""" + recipe_modules = [ + "cloudy.srv.recipe_generic_server", + "cloudy.srv.recipe_cache_redis", + "cloudy.srv.recipe_database_psql_gis", + "cloudy.srv.recipe_webserver_django", + "cloudy.srv.recipe_loadbalancer_nginx", + "cloudy.srv.recipe_vpn_server", + "cloudy.srv.recipe_standalone_server", + ] + + for module_name in recipe_modules: + with self.subTest(module=module_name): + try: + importlib.import_module(module_name) + except ImportError as e: + self.fail(f"Failed to import {module_name}: {e}") + + +class TestFabfileStructure(unittest.TestCase): + """Test that the fabfile.py can be loaded and has the expected structure.""" + + def test_fabfile_import(self): + """Test that fabfile.py can be imported.""" + try: + import fabfile + + self.assertTrue(hasattr(fabfile, "ns")) + except ImportError as e: + self.fail(f"Failed to import fabfile: {e}") + + def test_command_namespaces_exist(self): + """Test that expected command namespaces exist.""" + import fabfile + + expected_collections = [ + "recipe", + "sys", + "db", + "web", + "fw", + "security", + "services", + "storage", + "aws", + ] + + # Get all collection names from the namespace + collection_names = [] + for name, item in fabfile.ns.collections.items(): + collection_names.append(name) + + for expected in expected_collections: + with self.subTest(collection=expected): + self.assertIn( + expected, + collection_names, + f"Expected collection '{expected}' not found in fabfile", + ) + + def test_recipe_commands_exist(self): + """Test that recipe commands exist with expected names.""" + import fabfile + + recipe_collection = fabfile.ns.collections.get("recipe") + self.assertIsNotNone(recipe_collection, "Recipe collection not found") + + expected_recipes = [ + "gen-install", + "redis-install", + "psql-install", + "web-install", + "lb-install", + "vpn-install", + "sta-install", + ] + + recipe_tasks = list(recipe_collection.tasks.keys()) + + for expected in expected_recipes: + with self.subTest(recipe=expected): + self.assertIn(expected, recipe_tasks, f"Expected recipe '{expected}' not found") + + def test_db_commands_exist(self): + """Test that database command structure exists.""" + import fabfile + + db_collection = fabfile.ns.collections.get("db") + self.assertIsNotNone(db_collection, "DB collection not found") + + expected_subcollections = ["pg", "my", "pgb", "pgp", "gis"] + + for expected in expected_subcollections: + with self.subTest(subcollection=expected): + self.assertIn( + expected, + db_collection.collections, + f"Expected DB subcollection '{expected}' not found", + ) + + +class TestConfigurationSystem(unittest.TestCase): + """Test that the configuration system works correctly.""" + + def test_config_import(self): + """Test that configuration classes can be imported.""" + try: + from cloudy.util.conf import CloudyConfig + + self.assertTrue(callable(CloudyConfig)) + except ImportError as e: + self.fail(f"Failed to import CloudyConfig: {e}") + + @patch("cloudy.util.conf.os.path.exists") + def test_config_instantiation(self, mock_exists): + """Test that CloudyConfig can be instantiated.""" + mock_exists.return_value = False # Mock that config files don't exist + + try: + from cloudy.util.conf import CloudyConfig + + config = CloudyConfig([]) # Empty config list + self.assertIsNotNone(config) + except Exception as e: + self.fail(f"Failed to instantiate CloudyConfig: {e}") + + +class TestTaskDiscovery(unittest.TestCase): + """Test that Fabric can discover tasks correctly.""" + + def test_fabric_task_discovery(self): + """Test that Fabric can discover all tasks without errors.""" + import subprocess + import os + + # Change to project directory + project_dir = os.path.dirname(os.path.abspath(__file__)) + + try: + # Run fab -l to test task discovery + result = subprocess.run( + ["fab", "-l"], cwd=project_dir, capture_output=True, text=True, timeout=30 + ) + + # Check that fab -l completed successfully + self.assertEqual(result.returncode, 0, f"fab -l failed with error: {result.stderr}") + + # Check that we have a reasonable number of commands + lines = result.stdout.split("\n") + # Count lines that contain task names (have two spaces at start for task listing) + task_lines = [ + line + for line in lines + if line.strip() and (line.startswith(" ") and not line.startswith(" ")) + ] + + # We should have at least 50 commands (we know we have ~127) + self.assertGreater( + len(task_lines), + 50, + f"Too few commands discovered by Fabric. Found {len(task_lines)} tasks", + ) + + # Check for key command patterns + output = result.stdout + self.assertIn("recipe.", output, "Recipe commands not found") + self.assertIn("sys.", output, "System commands not found") + self.assertIn("db.", output, "Database commands not found") + self.assertIn("web.", output, "Web commands not found") + + except subprocess.TimeoutExpired: + self.fail("fab -l command timed out") + except Exception as e: + self.fail(f"Error running fab -l: {e}") + + +def run_minimal_tests(): + """Run the minimal test suite and return results.""" + print("🧪 Running Python Cloudy minimal test suite...") + print("=" * 50) + + # Create test suite + loader = unittest.TestLoader() + suite = unittest.TestSuite() + + # Add test classes + test_classes = [ + TestImports, + TestFabfileStructure, + TestConfigurationSystem, + TestTaskDiscovery, + ] + + for test_class in test_classes: + tests = loader.loadTestsFromTestCase(test_class) + suite.addTests(tests) + + # Run tests + runner = unittest.TextTestRunner(verbosity=2) + result = runner.run(suite) + + # Print summary + print("\n" + "=" * 50) + if result.wasSuccessful(): + print("✅ All minimal tests passed!") + print(f" Ran {result.testsRun} tests successfully") + else: + print("❌ Some tests failed!") + print(f" Ran {result.testsRun} tests") + print(f" Failures: {len(result.failures)}") + print(f" Errors: {len(result.errors)}") + + return result.wasSuccessful() + + +if __name__ == "__main__": + success = run_minimal_tests() + sys.exit(0 if success else 1) diff --git a/tests/test_runner.py b/tests/test_runner.py new file mode 100755 index 0000000..3cbcb6e --- /dev/null +++ b/tests/test_runner.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +""" +Python Cloudy Test Runner + +This runs the minimal test suite to ensure core functionality works during development. +""" + +import sys +import os + +# Add parent directory to path so we can import cloudy modules +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +if __name__ == "__main__": + from test_minimal import run_minimal_tests + + print("🚀 Python Cloudy Development Test Suite") + print("=" * 50) + + success = run_minimal_tests() + + if success: + print("\n🎉 All tests passed! The core functionality is working correctly.") + else: + print("\n💥 Some tests failed! Please check the output above.") + + sys.exit(0 if success else 1)