- Use DNS domain for lighthouse discovery (works with HAProxy/Keepalived) - All nodes are lighthouses by default for full redundancy - Remove static_host_map complexity - DNS handles everything - Ask for lighthouse domain during setup - Allow disabling lighthouse mode for remote/edge nodes - Simplified cluster secret: domain:port:ca_cert This allows using existing HA infrastructure (DNS pointing to alive nodes) instead of complex IP tracking and manual updates.
466 lines
13 KiB
Bash
Executable File
466 lines
13 KiB
Bash
Executable File
#!/bin/bash
|
|
set -euo pipefail
|
|
|
|
# Colors for output
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# Configuration
|
|
NEBULA_NETWORK="192.168.100.0/24"
|
|
NEBULA_PORT=4242
|
|
NEBULA_CONFIG="/etc/nebula"
|
|
GLUSTER_BRICK_PATH="/gluster/cluster"
|
|
GLUSTER_MOUNT_PATH="/data/storage"
|
|
GLUSTER_VOLUME="cluster-volume"
|
|
NEBULA_VERSION="v1.8.2"
|
|
|
|
echo -e "${GREEN}================================${NC}"
|
|
echo -e "${GREEN} Nullpoint Cluster Setup${NC}"
|
|
echo -e "${GREEN}================================${NC}\n"
|
|
|
|
# Check if running as root
|
|
if [ "$EUID" -ne 0 ]; then
|
|
echo -e "${RED}Please run as root${NC}"
|
|
exit 1
|
|
fi
|
|
|
|
# Install required packages
|
|
echo -e "${YELLOW}[+] Installing required packages...${NC}"
|
|
dnf install -y curl tar glusterfs-server glusterfs-client || exit 1
|
|
|
|
# Download and install Nebula
|
|
echo -e "${YELLOW}[+] Downloading Nebula ${NEBULA_VERSION}...${NC}"
|
|
cd /tmp
|
|
curl -LO "https://github.com/slackhq/nebula/releases/download/${NEBULA_VERSION}/nebula-linux-amd64.tar.gz"
|
|
tar -zxf nebula-linux-amd64.tar.gz
|
|
mv nebula nebula-cert /usr/local/bin/
|
|
chmod +x /usr/local/bin/nebula /usr/local/bin/nebula-cert
|
|
|
|
# Enable and start GlusterFS
|
|
systemctl enable glusterd
|
|
systemctl start glusterd
|
|
|
|
# Create directories
|
|
echo -e "${YELLOW}[+] Creating directories...${NC}"
|
|
mkdir -p "$GLUSTER_BRICK_PATH"
|
|
mkdir -p "$GLUSTER_MOUNT_PATH"
|
|
mkdir -p /data
|
|
mkdir -p "$NEBULA_CONFIG"
|
|
|
|
# Function to generate Nebula CA and certificates
|
|
generate_nebula_ca() {
|
|
echo -e "${YELLOW}[+] Generating Nebula CA...${NC}"
|
|
cd "$NEBULA_CONFIG"
|
|
/usr/local/bin/nebula-cert ca -name "Nullpoint Cluster CA"
|
|
chmod 600 ca.key
|
|
}
|
|
|
|
# Function to create host certificate
|
|
create_host_cert() {
|
|
local hostname="$1"
|
|
local ip="$2"
|
|
local groups="$3"
|
|
|
|
cd "$NEBULA_CONFIG"
|
|
/usr/local/bin/nebula-cert sign -name "$hostname" -ip "$ip" -groups "$groups"
|
|
chmod 600 "${hostname}.key"
|
|
}
|
|
|
|
# Function to get next available IP
|
|
get_next_ip() {
|
|
local base_ip="192.168.100"
|
|
local next_num=10
|
|
|
|
if [ -f "$NEBULA_CONFIG/cluster-registry.txt" ]; then
|
|
# Find highest IP in use
|
|
existing_ips=$(grep -oE "192\.168\.100\.[0-9]+" "$NEBULA_CONFIG/cluster-registry.txt" | cut -d. -f4 | sort -n | tail -1)
|
|
if [ ! -z "$existing_ips" ]; then
|
|
next_num=$((existing_ips + 1))
|
|
fi
|
|
fi
|
|
|
|
echo "${base_ip}.${next_num}"
|
|
}
|
|
|
|
# Function to setup firewall rules
|
|
setup_firewall() {
|
|
echo -e "${YELLOW}[+] Configuring firewall...${NC}"
|
|
|
|
# Nebula
|
|
firewall-cmd --permanent --add-port=${NEBULA_PORT}/udp
|
|
|
|
# GlusterFS ports
|
|
firewall-cmd --permanent --add-service=glusterfs
|
|
firewall-cmd --permanent --add-port=24007-24008/tcp # GlusterFS Daemon
|
|
firewall-cmd --permanent --add-port=49152-49200/tcp # Brick ports
|
|
|
|
# Allow traffic from Nebula network
|
|
firewall-cmd --permanent --zone=trusted --add-source=${NEBULA_NETWORK}
|
|
|
|
firewall-cmd --reload
|
|
}
|
|
|
|
# Create Nebula systemd service
|
|
create_nebula_service() {
|
|
cat > /etc/systemd/system/nebula.service <<EOF
|
|
[Unit]
|
|
Description=Nebula overlay networking tool
|
|
After=network.target
|
|
|
|
[Service]
|
|
Type=simple
|
|
User=root
|
|
Group=root
|
|
ExecStart=/usr/local/bin/nebula -config ${NEBULA_CONFIG}/config.yaml
|
|
ExecReload=/bin/kill -HUP \$MAINPID
|
|
Restart=always
|
|
RestartSec=5
|
|
|
|
[Install]
|
|
WantedBy=multi-user.target
|
|
EOF
|
|
|
|
systemctl daemon-reload
|
|
}
|
|
|
|
# Create new cluster
|
|
create_cluster() {
|
|
echo -e "${GREEN}[*] Creating new cluster...${NC}\n"
|
|
|
|
local hostname=$(hostname)
|
|
local node_ip="192.168.100.1"
|
|
|
|
# Ask for lighthouse domain
|
|
read -p "Enter lighthouse domain (e.g., cluster.example.com): " lighthouse_domain
|
|
if [ -z "$lighthouse_domain" ]; then
|
|
echo -e "${RED}Lighthouse domain required!${NC}"
|
|
exit 1
|
|
fi
|
|
|
|
# Ask if this node should be a lighthouse
|
|
read -p "Should this node be a lighthouse? [Y/n]: " is_lighthouse
|
|
is_lighthouse=${is_lighthouse:-Y}
|
|
if [[ "$is_lighthouse" =~ ^[Yy] ]]; then
|
|
am_lighthouse="true"
|
|
else
|
|
am_lighthouse="false"
|
|
fi
|
|
|
|
# Generate Nebula CA
|
|
generate_nebula_ca
|
|
|
|
# Create certificate for this node
|
|
create_host_cert "$hostname" "${node_ip}/24" "cluster"
|
|
|
|
# Create Nebula config
|
|
cat > "${NEBULA_CONFIG}/config.yaml" <<EOF
|
|
pki:
|
|
ca: ${NEBULA_CONFIG}/ca.crt
|
|
cert: ${NEBULA_CONFIG}/${hostname}.crt
|
|
key: ${NEBULA_CONFIG}/${hostname}.key
|
|
|
|
lighthouse:
|
|
am_lighthouse: ${am_lighthouse}
|
|
serve_dns: false
|
|
interval: 60
|
|
hosts:
|
|
- "${lighthouse_domain}:${NEBULA_PORT}"
|
|
|
|
listen:
|
|
host: 0.0.0.0
|
|
port: ${NEBULA_PORT}
|
|
|
|
punchy:
|
|
punch: true
|
|
respond: true
|
|
|
|
tun:
|
|
disabled: false
|
|
dev: nebula1
|
|
drop_local_broadcast: false
|
|
drop_multicast: false
|
|
tx_queue: 500
|
|
mtu: 1300
|
|
|
|
logging:
|
|
level: info
|
|
format: text
|
|
|
|
firewall:
|
|
conntrack:
|
|
tcp_timeout: 12m
|
|
udp_timeout: 3m
|
|
default_timeout: 10m
|
|
max_connections: 100000
|
|
|
|
outbound:
|
|
- port: any
|
|
proto: any
|
|
host: any
|
|
|
|
inbound:
|
|
- port: any
|
|
proto: icmp
|
|
host: any
|
|
- port: any
|
|
proto: any
|
|
host: any
|
|
EOF
|
|
|
|
# Start Nebula as systemd service
|
|
create_nebula_service
|
|
systemctl enable nebula
|
|
systemctl start nebula
|
|
|
|
# Setup firewall
|
|
setup_firewall
|
|
|
|
# Create cluster registry
|
|
echo "${lighthouse_ip} lighthouse ${hostname}" > "${NEBULA_CONFIG}/cluster-registry.txt"
|
|
|
|
# Create GlusterFS volume
|
|
echo -e "${YELLOW}[+] Creating GlusterFS volume...${NC}"
|
|
mkdir -p "${GLUSTER_BRICK_PATH}/brick1"
|
|
gluster volume create ${GLUSTER_VOLUME} $(hostname):${GLUSTER_BRICK_PATH}/brick1 force 2>/dev/null || true
|
|
gluster volume start ${GLUSTER_VOLUME} 2>/dev/null || true
|
|
|
|
# Mount volume
|
|
mount -t glusterfs localhost:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH}
|
|
grep -q "${GLUSTER_VOLUME}" /etc/fstab || echo "localhost:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} glusterfs defaults,_netdev 0 0" >> /etc/fstab
|
|
|
|
# Package CA certificate for sharing
|
|
ca_cert_b64=$(base64 -w0 "${NEBULA_CONFIG}/ca.crt")
|
|
|
|
echo -e "\n${GREEN}════════════════════════════════════════${NC}"
|
|
echo -e "${GREEN}Cluster created successfully!${NC}"
|
|
echo -e "${GREEN}════════════════════════════════════════${NC}\n"
|
|
echo -e "Share this cluster secret with joining nodes:\n"
|
|
echo -e "${GREEN}${lighthouse_domain}:${NEBULA_PORT}:${ca_cert_b64}${NC}\n"
|
|
echo -e "${YELLOW}Status:${NC}"
|
|
echo " - Nebula IP: ${node_ip}"
|
|
echo " - Lighthouse domain: ${lighthouse_domain}:${NEBULA_PORT}"
|
|
echo " - This node is lighthouse: ${am_lighthouse}"
|
|
echo " - GlusterFS volume: ${GLUSTER_VOLUME}"
|
|
echo " - Mount point: ${GLUSTER_MOUNT_PATH}"
|
|
}
|
|
|
|
# Join existing cluster
|
|
join_cluster() {
|
|
echo -e "${GREEN}[*] Joining existing cluster...${NC}\n"
|
|
|
|
local hostname=$(hostname)
|
|
local my_ip=$(get_next_ip)
|
|
|
|
# Get cluster details
|
|
read -p "Enter cluster secret (lighthouse_domain:port:ca_cert_base64): " cluster_secret
|
|
|
|
if [ -z "$cluster_secret" ]; then
|
|
echo -e "${RED}Cluster secret required!${NC}"
|
|
exit 1
|
|
fi
|
|
|
|
# Parse secret
|
|
lighthouse_domain=$(echo "$cluster_secret" | cut -d: -f1)
|
|
nebula_port=$(echo "$cluster_secret" | cut -d: -f2)
|
|
ca_cert_b64=$(echo "$cluster_secret" | cut -d: -f3-)
|
|
|
|
# Ask if this node should be a lighthouse
|
|
read -p "Should this node be a lighthouse? [Y/n]: " is_lighthouse
|
|
is_lighthouse=${is_lighthouse:-Y}
|
|
if [[ "$is_lighthouse" =~ ^[Yy] ]]; then
|
|
am_lighthouse="true"
|
|
else
|
|
am_lighthouse="false"
|
|
fi
|
|
|
|
echo -e "${YELLOW}[+] Configuring Nebula (IP: ${my_ip})...${NC}"
|
|
|
|
# Decode and save CA certificate
|
|
echo "$ca_cert_b64" | base64 -d > "${NEBULA_CONFIG}/ca.crt"
|
|
|
|
echo -e "${RED}WARNING: Certificate signing not implemented in this simplified version.${NC}"
|
|
echo -e "${YELLOW}On the lighthouse node, run this command to create a certificate for this node:${NC}"
|
|
echo -e "${GREEN}cd ${NEBULA_CONFIG} && /usr/local/bin/nebula-cert sign -name \"${hostname}\" -ip \"${my_ip}/24\" -groups \"cluster\"${NC}"
|
|
echo -e "${YELLOW}Then copy ${hostname}.crt and ${hostname}.key to ${NEBULA_CONFIG}/ on this node.${NC}"
|
|
|
|
read -p "Press enter once you've created and copied the certificate files..."
|
|
|
|
# Create Nebula config
|
|
cat > "${NEBULA_CONFIG}/config.yaml" <<EOF
|
|
pki:
|
|
ca: ${NEBULA_CONFIG}/ca.crt
|
|
cert: ${NEBULA_CONFIG}/${hostname}.crt
|
|
key: ${NEBULA_CONFIG}/${hostname}.key
|
|
|
|
lighthouse:
|
|
am_lighthouse: ${am_lighthouse}
|
|
interval: 60
|
|
hosts:
|
|
- "${lighthouse_domain}:${nebula_port}"
|
|
|
|
listen:
|
|
host: 0.0.0.0
|
|
port: ${NEBULA_PORT}
|
|
|
|
punchy:
|
|
punch: true
|
|
respond: true
|
|
|
|
tun:
|
|
disabled: false
|
|
dev: nebula1
|
|
drop_local_broadcast: false
|
|
drop_multicast: false
|
|
tx_queue: 500
|
|
mtu: 1300
|
|
|
|
logging:
|
|
level: info
|
|
format: text
|
|
|
|
firewall:
|
|
conntrack:
|
|
tcp_timeout: 12m
|
|
udp_timeout: 3m
|
|
default_timeout: 10m
|
|
max_connections: 100000
|
|
|
|
outbound:
|
|
- port: any
|
|
proto: any
|
|
host: any
|
|
|
|
inbound:
|
|
- port: any
|
|
proto: icmp
|
|
host: any
|
|
- port: any
|
|
proto: any
|
|
host: any
|
|
EOF
|
|
|
|
# Start Nebula
|
|
create_nebula_service
|
|
systemctl enable nebula
|
|
systemctl start nebula
|
|
|
|
# Setup firewall
|
|
setup_firewall
|
|
|
|
# Wait for Nebula connection
|
|
echo -e "${YELLOW}[+] Waiting for Nebula connection...${NC}"
|
|
sleep 5
|
|
|
|
# Test connection - try pinging the first node
|
|
echo -e "${YELLOW}[+] Testing Nebula connection...${NC}"
|
|
if ping -c 1 -W 3 192.168.100.1 > /dev/null 2>&1; then
|
|
echo -e "${GREEN}[✓] Connected to node at 192.168.100.1${NC}"
|
|
else
|
|
echo -e "${YELLOW}[!] Could not reach 192.168.100.1 - this may be normal if it's the first node${NC}"
|
|
fi
|
|
|
|
# Register with cluster
|
|
echo "${my_ip} ${hostname} $(date)" >> "${NEBULA_CONFIG}/cluster-registry.txt"
|
|
|
|
# Join GlusterFS cluster
|
|
echo -e "${YELLOW}[+] Joining GlusterFS cluster...${NC}"
|
|
|
|
# Try to probe existing nodes
|
|
echo -e "${YELLOW}[+] Looking for existing GlusterFS peers...${NC}"
|
|
gluster peer probe 192.168.100.1 2>/dev/null || echo "Could not reach 192.168.100.1"
|
|
|
|
# Wait for peer to be connected
|
|
sleep 3
|
|
|
|
# Create brick directory
|
|
mkdir -p "${GLUSTER_BRICK_PATH}/brick1"
|
|
|
|
# Add brick to existing volume and increase replica count
|
|
echo -e "${YELLOW}[+] Adding brick to GlusterFS volume...${NC}"
|
|
|
|
# Get current replica count
|
|
replica_count=$(gluster volume info ${GLUSTER_VOLUME} 2>/dev/null | grep "Number of Bricks" | grep -oE "[0-9]+" | head -1)
|
|
new_replica_count=$((replica_count + 1))
|
|
|
|
# Add brick with increased replica count
|
|
gluster volume add-brick ${GLUSTER_VOLUME} replica ${new_replica_count} $(hostname):${GLUSTER_BRICK_PATH}/brick1 force
|
|
|
|
# Mount the volume
|
|
mount -t glusterfs 192.168.100.1:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH}
|
|
|
|
# Add to fstab
|
|
grep -q "${GLUSTER_VOLUME}" /etc/fstab || echo "192.168.100.1:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} glusterfs defaults,_netdev 0 0" >> /etc/fstab
|
|
|
|
echo -e "\n${GREEN}════════════════════════════════════════${NC}"
|
|
echo -e "${GREEN}Successfully joined cluster!${NC}"
|
|
echo -e "${GREEN}════════════════════════════════════════${NC}\n"
|
|
echo -e "${YELLOW}Node details:${NC}"
|
|
echo " - Nebula IP: ${my_ip}"
|
|
echo " - Hostname: ${hostname}"
|
|
echo " - GlusterFS mounted at: ${GLUSTER_MOUNT_PATH}"
|
|
}
|
|
|
|
# Show cluster status
|
|
show_status() {
|
|
echo -e "\n${YELLOW}=== Cluster Status ===${NC}\n"
|
|
|
|
if [ -f "${NEBULA_CONFIG}/config.yaml" ]; then
|
|
echo -e "${GREEN}Nebula Status:${NC}"
|
|
systemctl is-active nebula && echo "Service: Active" || echo "Service: Inactive"
|
|
|
|
if ip addr show nebula1 >/dev/null 2>&1; then
|
|
echo "Interface: nebula1 $(ip addr show nebula1 | grep 'inet ' | awk '{print $2}')"
|
|
else
|
|
echo "Interface: Not found"
|
|
fi
|
|
echo ""
|
|
|
|
if [ -f "${NEBULA_CONFIG}/cluster-registry.txt" ]; then
|
|
echo -e "${GREEN}Cluster Nodes:${NC}"
|
|
cat "${NEBULA_CONFIG}/cluster-registry.txt"
|
|
echo ""
|
|
fi
|
|
else
|
|
echo -e "${RED}Nebula not configured${NC}\n"
|
|
fi
|
|
|
|
echo -e "${GREEN}GlusterFS Status:${NC}"
|
|
gluster peer status 2>/dev/null || echo "Not connected to cluster"
|
|
echo ""
|
|
gluster volume status ${GLUSTER_VOLUME} 2>/dev/null || echo "Volume ${GLUSTER_VOLUME} not found"
|
|
echo ""
|
|
|
|
echo -e "${GREEN}Mounted at:${NC} ${GLUSTER_MOUNT_PATH}"
|
|
df -h ${GLUSTER_MOUNT_PATH} 2>/dev/null || echo "Not mounted"
|
|
}
|
|
|
|
# Main menu
|
|
echo "What would you like to do?"
|
|
echo " 1) Create new cluster"
|
|
echo " 2) Join existing cluster"
|
|
echo " 3) Show cluster status"
|
|
echo " 4) Exit"
|
|
echo ""
|
|
read -p "Enter choice [1-4]: " choice
|
|
|
|
case $choice in
|
|
1)
|
|
create_cluster
|
|
;;
|
|
2)
|
|
join_cluster
|
|
;;
|
|
3)
|
|
show_status
|
|
;;
|
|
4)
|
|
echo "Exiting..."
|
|
exit 0
|
|
;;
|
|
*)
|
|
echo -e "${RED}Invalid choice${NC}"
|
|
exit 1
|
|
;;
|
|
esac |