Add remote node support to nullpoint cluster
- Full nodes: contribute storage, act as lighthouses - Remote nodes: don't contribute storage, not lighthouses, but have full read/write access - Expanded README with clearer feature descriptions - Interactive node type selection during join - Remote nodes useful for edge locations or low-storage devices
This commit is contained in:
parent
7bc8fb8f06
commit
b3db6f0f82
17
README.md
17
README.md
@ -51,10 +51,23 @@ The installer will:
|
||||
<br>
|
||||
</div>
|
||||
|
||||
Distributed storage cluster with [Nebula](https://github.com/slackhq/nebula) mesh networking and [GlusterFS](https://www.gluster.org/).
|
||||
Encrypted network and storage pool using [Nebula](https://github.com/slackhq/nebula) mesh VPN and [GlusterFS](https://www.gluster.org/) distributed filesystem.
|
||||
|
||||
### Features
|
||||
|
||||
- **Encrypted mesh network** - All traffic encrypted via Nebula overlay (192.168.100.0/24)
|
||||
- **Distributed storage** - Data replicated across all storage nodes
|
||||
- **Simple joining** - Single preshared secret + lighthouse endpoint
|
||||
- **Flexible nodes** - Full nodes (replicate data) or remote nodes (no storage)
|
||||
|
||||
### Setup
|
||||
|
||||
```bash
|
||||
wget -qO- https://git.dominik-roth.eu/dodox/nullpoint/raw/branch/master/cluster-setup.sh | sudo bash
|
||||
```
|
||||
|
||||
Join via single preshared secret and at least one lighthouse server. Storage replicated across all nodes at `/data/storage/`.
|
||||
Choose your node type:
|
||||
- **Full node** - Contributes storage, becomes lighthouse, read/write access
|
||||
- **Remote node** - Full read/write access, no local storage contribution
|
||||
|
||||
Storage mounted at `/data/storage/` on all nodes.
|
||||
|
111
cluster-setup.sh
111
cluster-setup.sh
@ -142,13 +142,20 @@ create_cluster() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Ask if this node should be a lighthouse
|
||||
read -p "Should this node be a lighthouse? [Y/n]: " is_lighthouse
|
||||
is_lighthouse=${is_lighthouse:-Y}
|
||||
if [[ "$is_lighthouse" =~ ^[Yy] ]]; then
|
||||
am_lighthouse="true"
|
||||
else
|
||||
# Ask about node type
|
||||
echo -e "${YELLOW}Select node type:${NC}"
|
||||
echo " 1) Full node (contributes storage, lighthouse, read/write)"
|
||||
echo " 2) Remote node (no storage contribution, not lighthouse)"
|
||||
read -p "Enter choice [1-2]: " node_type
|
||||
|
||||
if [ "$node_type" = "2" ]; then
|
||||
is_remote="true"
|
||||
am_lighthouse="false"
|
||||
echo -e "${YELLOW}Configuring as remote node (no storage contribution)${NC}"
|
||||
else
|
||||
is_remote="false"
|
||||
am_lighthouse="true"
|
||||
echo -e "${YELLOW}Configuring as full node${NC}"
|
||||
fi
|
||||
|
||||
# Generate Nebula CA
|
||||
@ -269,13 +276,20 @@ join_cluster() {
|
||||
nebula_port=$(echo "$cluster_secret" | cut -d: -f2)
|
||||
ca_cert_b64=$(echo "$cluster_secret" | cut -d: -f3-)
|
||||
|
||||
# Ask if this node should be a lighthouse
|
||||
read -p "Should this node be a lighthouse? [Y/n]: " is_lighthouse
|
||||
is_lighthouse=${is_lighthouse:-Y}
|
||||
if [[ "$is_lighthouse" =~ ^[Yy] ]]; then
|
||||
am_lighthouse="true"
|
||||
else
|
||||
# Ask about node type
|
||||
echo -e "${YELLOW}Select node type:${NC}"
|
||||
echo " 1) Full node (contributes storage, lighthouse, read/write)"
|
||||
echo " 2) Remote node (no storage contribution, not lighthouse)"
|
||||
read -p "Enter choice [1-2]: " node_type
|
||||
|
||||
if [ "$node_type" = "2" ]; then
|
||||
is_remote="true"
|
||||
am_lighthouse="false"
|
||||
echo -e "${YELLOW}Configuring as remote node (no storage contribution)${NC}"
|
||||
else
|
||||
is_remote="false"
|
||||
am_lighthouse="true"
|
||||
echo -e "${YELLOW}Configuring as full node${NC}"
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}[+] Configuring Nebula (IP: ${my_ip})...${NC}"
|
||||
@ -367,34 +381,50 @@ EOF
|
||||
# Register with cluster
|
||||
echo "${my_ip} ${hostname} $(date)" >> "${NEBULA_CONFIG}/cluster-registry.txt"
|
||||
|
||||
# Join GlusterFS cluster
|
||||
echo -e "${YELLOW}[+] Joining GlusterFS cluster...${NC}"
|
||||
|
||||
# Try to probe existing nodes
|
||||
echo -e "${YELLOW}[+] Looking for existing GlusterFS peers...${NC}"
|
||||
gluster peer probe 192.168.100.1 2>/dev/null || echo "Could not reach 192.168.100.1"
|
||||
|
||||
# Wait for peer to be connected
|
||||
sleep 3
|
||||
|
||||
# Create brick directory
|
||||
mkdir -p "${GLUSTER_BRICK_PATH}/brick1"
|
||||
|
||||
# Add brick to existing volume and increase replica count
|
||||
echo -e "${YELLOW}[+] Adding brick to GlusterFS volume...${NC}"
|
||||
|
||||
# Get current replica count
|
||||
replica_count=$(gluster volume info ${GLUSTER_VOLUME} 2>/dev/null | grep "Number of Bricks" | grep -oE "[0-9]+" | head -1)
|
||||
new_replica_count=$((replica_count + 1))
|
||||
|
||||
# Add brick with increased replica count
|
||||
gluster volume add-brick ${GLUSTER_VOLUME} replica ${new_replica_count} $(hostname):${GLUSTER_BRICK_PATH}/brick1 force
|
||||
|
||||
# Mount the volume
|
||||
mount -t glusterfs 192.168.100.1:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH}
|
||||
|
||||
# Add to fstab
|
||||
grep -q "${GLUSTER_VOLUME}" /etc/fstab || echo "192.168.100.1:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} glusterfs defaults,_netdev 0 0" >> /etc/fstab
|
||||
# Handle GlusterFS setup based on node type
|
||||
if [ "$is_remote" = "true" ]; then
|
||||
# Remote node - just mount, don't contribute storage
|
||||
echo -e "${YELLOW}[+] Mounting GlusterFS...${NC}"
|
||||
|
||||
# Mount the volume with full read/write access
|
||||
mount -t glusterfs 192.168.100.1:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH}
|
||||
|
||||
# Add to fstab
|
||||
grep -q "${GLUSTER_VOLUME}" /etc/fstab || echo "192.168.100.1:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} glusterfs defaults,_netdev 0 0" >> /etc/fstab
|
||||
|
||||
echo -e "${GREEN}Remote node configured - full access to cluster storage without contributing local storage${NC}"
|
||||
else
|
||||
# Full node - full participation
|
||||
echo -e "${YELLOW}[+] Joining GlusterFS cluster as full node...${NC}"
|
||||
|
||||
# Try to probe existing nodes
|
||||
echo -e "${YELLOW}[+] Looking for existing GlusterFS peers...${NC}"
|
||||
gluster peer probe 192.168.100.1 2>/dev/null || echo "Could not reach 192.168.100.1"
|
||||
|
||||
# Wait for peer to be connected
|
||||
sleep 3
|
||||
|
||||
# Create brick directory
|
||||
mkdir -p "${GLUSTER_BRICK_PATH}/brick1"
|
||||
|
||||
# Add brick to existing volume and increase replica count
|
||||
echo -e "${YELLOW}[+] Adding brick to GlusterFS volume...${NC}"
|
||||
|
||||
# Get current replica count
|
||||
replica_count=$(gluster volume info ${GLUSTER_VOLUME} 2>/dev/null | grep "Number of Bricks" | grep -oE "[0-9]+" | head -1)
|
||||
new_replica_count=$((replica_count + 1))
|
||||
|
||||
# Add brick with increased replica count
|
||||
gluster volume add-brick ${GLUSTER_VOLUME} replica ${new_replica_count} $(hostname):${GLUSTER_BRICK_PATH}/brick1 force
|
||||
|
||||
# Mount the volume
|
||||
mount -t glusterfs 192.168.100.1:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH}
|
||||
|
||||
# Add to fstab
|
||||
grep -q "${GLUSTER_VOLUME}" /etc/fstab || echo "192.168.100.1:/${GLUSTER_VOLUME} ${GLUSTER_MOUNT_PATH} glusterfs defaults,_netdev 0 0" >> /etc/fstab
|
||||
|
||||
echo -e "${GREEN}Full node configured - contributing storage to cluster${NC}"
|
||||
fi
|
||||
|
||||
echo -e "\n${GREEN}════════════════════════════════════════${NC}"
|
||||
echo -e "${GREEN}Successfully joined cluster!${NC}"
|
||||
@ -402,6 +432,7 @@ EOF
|
||||
echo -e "${YELLOW}Node details:${NC}"
|
||||
echo " - Nebula IP: ${my_ip}"
|
||||
echo " - Hostname: ${hostname}"
|
||||
echo " - Node type: $([ "$is_remote" = "true" ] && echo "Remote (no storage)" || echo "Full (contributes storage)")"
|
||||
echo " - GlusterFS mounted at: ${GLUSTER_MOUNT_PATH}"
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user