#!/bin/bash set -euo pipefail # Colors for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' NC='\033[0m' # No Color # Configuration NEBULA_NETWORK="192.168.100.0/24" NEBULA_PORT=4242 NEBULA_CONFIG="/etc/nebula" GLUSTER_BRICK_PATH="/gluster/cluster" GLUSTER_MOUNT_PATH="/data/storage" GLUSTER_VOLUME="cluster-volume" NEBULA_VERSION="v1.9.6" CLUSTER_LOGO=$(cat << "EOF" == .@@@@@ =@@@@@% +@@@@@ :@@@@@ *@@@@: -*#+: *@@@- %@@@@ .%@@@@@@@@@@@@* @@@@@ %@@@@ *@@@@@@@@@@@@@@@@ +@@@@* #@@@@. %@@@@@@@. @@@@@@ -@@@@%:@@@@@@: *@@@@@@. #@@@@@@@@@: .@@@@@@@@%=.#@@@@@@@@@@@@@#. *@@@@@@@@@@@@@@@@@@@@@@@@@@+ =@@@@@@@@@@@@@@%=#@@@@@@@@% :@@@@@@@@@+ -@@@@@@+ *@@@@@-%@@@@: .@@@@@# =@@@@@@@ .@@@@# #@@@@+ %@@@@@@@@@@@@@@@@. @@@@@ @@@@@ #@@@@@@@@@@@@@= @@@@@ =@@@% :#@@@@- .@@@@# #@@@@- #@@@@% +@@@@@* @@@@@- .#%. [nullpoint cluster] EOF ) echo -e "${GREEN}${CLUSTER_LOGO}${NC}\n" # Check if running as root if [ "$EUID" -ne 0 ]; then echo -e "${RED}Please run as root${NC}" exit 1 fi # Install base packages echo -e "${YELLOW}[+] Installing base packages...${NC}" dnf install -y curl tar || exit 1 # Download and install Nebula echo -e "${YELLOW}[+] Downloading Nebula ${NEBULA_VERSION}...${NC}" cd /tmp curl -LO "https://github.com/slackhq/nebula/releases/download/${NEBULA_VERSION}/nebula-linux-amd64.tar.gz" tar -zxf nebula-linux-amd64.tar.gz mv nebula nebula-cert /usr/local/bin/ chmod +x /usr/local/bin/nebula /usr/local/bin/nebula-cert # Create directories echo -e "${YELLOW}[+] Creating directories...${NC}" mkdir -p "$GLUSTER_MOUNT_PATH" mkdir -p /data mkdir -p "$NEBULA_CONFIG" # Function to generate Nebula CA and certificates generate_nebula_ca() { echo -e "${YELLOW}[+] Generating Nebula CA...${NC}" cd "$NEBULA_CONFIG" /usr/local/bin/nebula-cert ca -name "Nullpoint Cluster CA" chmod 600 ca.key } # Function to create host certificate create_host_cert() { local hostname="$1" local ip="$2" local groups="$3" cd "$NEBULA_CONFIG" /usr/local/bin/nebula-cert sign -name "$hostname" -ip "$ip" -groups "$groups" chmod 600 "${hostname}.key" } # Function to get next available IP get_next_ip() { local base_ip="192.168.100" local next_num=10 if [ -f "$NEBULA_CONFIG/cluster-registry.txt" ]; then # Find highest IP in use existing_ips=$(grep -oE "192\.168\.100\.[0-9]+" "$NEBULA_CONFIG/cluster-registry.txt" | cut -d. -f4 | sort -n | tail -1) if [ ! -z "$existing_ips" ]; then next_num=$((existing_ips + 1)) fi fi echo "${base_ip}.${next_num}" } # Function to setup firewall rules setup_firewall() { echo -e "${YELLOW}[+] Configuring firewall...${NC}" # Nebula firewall-cmd --permanent --add-port=${NEBULA_PORT}/udp # GlusterFS ports firewall-cmd --permanent --add-service=glusterfs firewall-cmd --permanent --add-port=24007-24008/tcp # GlusterFS Daemon firewall-cmd --permanent --add-port=49152-49200/tcp # Brick ports # Allow traffic from Nebula network firewall-cmd --permanent --zone=trusted --add-source=${NEBULA_NETWORK} firewall-cmd --reload } # Create Nebula systemd service create_nebula_service() { cat > /etc/systemd/system/nebula.service < "${NEBULA_CONFIG}/config.yaml" < "${NEBULA_CONFIG}/cluster-registry.txt" # Create GlusterFS volume echo -e "${YELLOW}[+] Creating GlusterFS volume...${NC}" mkdir -p "${GLUSTER_BRICK_PATH}/brick1" gluster volume create ${GLUSTER_VOLUME} $(hostname):${GLUSTER_BRICK_PATH}/brick1 force 2>/dev/null || true gluster volume start ${GLUSTER_VOLUME} 2>/dev/null || true # Mount volume mount -t glusterfs localhost:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} grep -q "${GLUSTER_VOLUME}" /etc/fstab || echo "localhost:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} glusterfs defaults,_netdev 0 0" >> /etc/fstab # Package CA certificate for sharing ca_cert_b64=$(base64 -w0 "${NEBULA_CONFIG}/ca.crt") echo -e "\n${GREEN}════════════════════════════════════════${NC}" echo -e "${GREEN}Cluster created successfully!${NC}" echo -e "${GREEN}════════════════════════════════════════${NC}\n" echo -e "Share this cluster secret with joining nodes:\n" echo -e "${GREEN}${lighthouse_endpoints}:${NEBULA_PORT}:${ca_cert_b64}${NC}\n" echo -e "${YELLOW}Status:${NC}" echo " - Nebula IP: ${node_ip}" echo " - Lighthouse endpoints: ${lighthouse_endpoints}:${NEBULA_PORT}" echo " - This node is lighthouse: ${am_lighthouse}" echo " - GlusterFS volume: ${GLUSTER_VOLUME}" echo " - Mount point: ${GLUSTER_MOUNT_PATH}" } # Join existing cluster join_cluster() { echo -e "${GREEN}[*] Joining existing cluster...${NC}\n" local hostname=$(hostname) local my_ip=$(get_next_ip) # Get cluster details read -p "Enter cluster secret (lighthouse_endpoints:port:ca_cert_base64): " cluster_secret if [ -z "$cluster_secret" ]; then echo -e "${RED}Cluster secret required!${NC}" exit 1 fi # Parse secret lighthouse_endpoints=$(echo "$cluster_secret" | cut -d: -f1) nebula_port=$(echo "$cluster_secret" | cut -d: -f2) ca_cert_b64=$(echo "$cluster_secret" | cut -d: -f3-) # Ask about node type echo -e "${YELLOW}Select node type:${NC}" echo " 1) Full node (contributes storage, lighthouse, read/write)" echo " 2) Remote node (client only, no storage contribution)" echo -e "${YELLOW}Note: Use remote nodes for locations with high latency to the cluster${NC}" read -p "Enter choice [1-2]: " node_type if [ "$node_type" = "2" ]; then is_remote="true" am_lighthouse="false" echo -e "${YELLOW}Configuring as remote node (GlusterFS client only)${NC}" # Install only GlusterFS client packages echo -e "${YELLOW}[+] Installing GlusterFS client packages...${NC}" dnf install -y glusterfs glusterfs-fuse || exit 1 else is_remote="false" am_lighthouse="true" echo -e "${YELLOW}Configuring as full node (GlusterFS server)${NC}" # Install GlusterFS server packages echo -e "${YELLOW}[+] Installing GlusterFS server packages...${NC}" dnf install -y glusterfs-server || exit 1 systemctl enable glusterd systemctl start glusterd # Create brick directory for full nodes mkdir -p "$GLUSTER_BRICK_PATH" fi echo -e "${YELLOW}[+] Configuring Nebula (IP: ${my_ip})...${NC}" # Decode and save CA certificate echo "$ca_cert_b64" | base64 -d > "${NEBULA_CONFIG}/ca.crt" echo -e "${RED}WARNING: Certificate signing not implemented in this simplified version.${NC}" echo -e "${YELLOW}On the lighthouse node, run this command to create a certificate for this node:${NC}" echo -e "${GREEN}cd ${NEBULA_CONFIG} && /usr/local/bin/nebula-cert sign -name \"${hostname}\" -ip \"${my_ip}/24\" -groups \"cluster\"${NC}" echo -e "${YELLOW}Then copy ${hostname}.crt and ${hostname}.key to ${NEBULA_CONFIG}/ on this node.${NC}" read -p "Press enter once you've created and copied the certificate files..." # Create Nebula config cat > "${NEBULA_CONFIG}/config.yaml" < /dev/null 2>&1; then echo -e "${GREEN}[✓] Connected to node at 192.168.100.1${NC}" else echo -e "${YELLOW}[!] Could not reach 192.168.100.1 - this may be normal if it's the first node${NC}" fi # Register with cluster node_type_str=$([ "$is_remote" = "true" ] && echo "remote" || echo "full") echo "${my_ip} ${hostname} ${node_type_str} $(date)" >> "${NEBULA_CONFIG}/cluster-registry.txt" # Handle GlusterFS setup based on node type if [ "$is_remote" = "true" ]; then # Remote node - GlusterFS client only echo -e "${YELLOW}[+] Mounting GlusterFS as client...${NC}" # Find a full node to connect to (try first few IPs) mount_successful=false for ip in 192.168.100.1 192.168.100.2 192.168.100.3; do if ping -c 1 -W 2 $ip > /dev/null 2>&1; then echo -e "${YELLOW}Attempting to mount from $ip...${NC}" if mount -t glusterfs ${ip}:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} 2>/dev/null; then mount_successful=true mount_ip=$ip break fi fi done if [ "$mount_successful" = "true" ]; then # Add to fstab grep -q "${GLUSTER_VOLUME}" /etc/fstab || echo "${mount_ip}:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} glusterfs defaults,_netdev,backup-volfile-servers=192.168.100.1:192.168.100.2:192.168.100.3 0 0" >> /etc/fstab echo -e "${GREEN}Remote node configured - mounted cluster storage as client${NC}" else echo -e "${RED}Failed to mount GlusterFS volume!${NC}" echo "Make sure at least one full node is running." fi else # Full node - GlusterFS server echo -e "${YELLOW}[+] Joining GlusterFS cluster as server...${NC}" # Try to probe existing nodes echo -e "${YELLOW}[+] Looking for existing GlusterFS peers...${NC}" peer_found=false for ip in 192.168.100.1 192.168.100.2 192.168.100.3; do if [ "$ip" != "${my_ip}" ] && ping -c 1 -W 2 $ip > /dev/null 2>&1; then if gluster peer probe $ip 2>/dev/null; then echo "Connected to peer at $ip" peer_found=true break fi fi done if [ "$peer_found" = "false" ]; then echo -e "${YELLOW}No existing peers found - this might be normal for early nodes${NC}" fi # Wait for peer connection sleep 3 # Create brick directory mkdir -p "${GLUSTER_BRICK_PATH}/brick1" if [ "$peer_found" = "true" ]; then # Add brick to existing volume echo -e "${YELLOW}[+] Adding brick to GlusterFS volume...${NC}" # Get current replica count replica_count=$(gluster volume info ${GLUSTER_VOLUME} 2>/dev/null | grep "Number of Bricks" | grep -oE "[0-9]+" | head -1) if [ ! -z "$replica_count" ]; then new_replica_count=$((replica_count + 1)) gluster volume add-brick ${GLUSTER_VOLUME} replica ${new_replica_count} $(hostname):${GLUSTER_BRICK_PATH}/brick1 force fi fi # Mount the volume locally mount -t glusterfs localhost:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} 2>/dev/null || mount -t glusterfs 192.168.100.1:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} 2>/dev/null # Add to fstab grep -q "${GLUSTER_VOLUME}" /etc/fstab || echo "localhost:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} glusterfs defaults,_netdev 0 0" >> /etc/fstab echo -e "${GREEN}Full node configured - contributing storage to cluster${NC}" fi echo -e "\n${GREEN}════════════════════════════════════════${NC}" echo -e "${GREEN}Successfully joined cluster!${NC}" echo -e "${GREEN}════════════════════════════════════════${NC}\n" echo -e "${YELLOW}Node details:${NC}" echo " - Nebula IP: ${my_ip}" echo " - Hostname: ${hostname}" echo " - Node type: $([ "$is_remote" = "true" ] && echo "Remote (no storage)" || echo "Full (contributes storage)")" echo " - GlusterFS mounted at: ${GLUSTER_MOUNT_PATH}" } # Show cluster status show_status() { echo -e "\n${YELLOW}=== Cluster Status ===${NC}\n" if [ -f "${NEBULA_CONFIG}/config.yaml" ]; then echo -e "${GREEN}Nebula Status:${NC}" systemctl is-active nebula && echo "Service: Active" || echo "Service: Inactive" if ip addr show nebula1 >/dev/null 2>&1; then echo "Interface: nebula1 $(ip addr show nebula1 | grep 'inet ' | awk '{print $2}')" else echo "Interface: Not found" fi echo "" if [ -f "${NEBULA_CONFIG}/cluster-registry.txt" ]; then echo -e "${GREEN}Cluster Nodes:${NC}" cat "${NEBULA_CONFIG}/cluster-registry.txt" echo "" fi else echo -e "${RED}Nebula not configured${NC}\n" fi echo -e "${GREEN}GlusterFS Status:${NC}" gluster peer status 2>/dev/null || echo "Not connected to cluster" echo "" gluster volume status ${GLUSTER_VOLUME} 2>/dev/null || echo "Volume ${GLUSTER_VOLUME} not found" echo "" echo -e "${GREEN}Mounted at:${NC} ${GLUSTER_MOUNT_PATH}" df -h ${GLUSTER_MOUNT_PATH} 2>/dev/null || echo "Not mounted" } # Main menu echo "What would you like to do?" echo " 1) Create new cluster" echo " 2) Join existing cluster" echo " 3) Show cluster status" echo " 4) Exit" echo "" read -p "Enter choice [1-4]: " choice case $choice in 1) create_cluster ;; 2) join_cluster ;; 3) show_status ;; 4) echo "Exiting..." exit 0 ;; *) echo -e "${RED}Invalid choice${NC}" exit 1 ;; esac