first commit

This commit is contained in:
Roy
2025-06-23 21:19:51 +02:00
commit a4f9ea11f3
69 changed files with 4857 additions and 0 deletions

View File

@ -0,0 +1,23 @@
#!/bin/bash
#Roy Cohen :roy@wondercohen.nl
#objective :Gluster check bricks for Nagios
#First line of code :15/01/2019
#last update :28/01/2019
#version :1.1
######START OF SCRIPT#######
IP_OF_DOWN_BRICK=$(cat /tmp/brick_status |grep -v "Self-heal" | grep -E 'Brick|N/A|N' |grep -v "Y"| grep -B1 "N/A" | grep Brick| uniq | cut -d ":" -f1| cut -d " " -f2)
NUMBER_OF_DOWN_BRICKS=$(cat /tmp/brick_status |grep -v "Self-heal" | grep -E 'Brick|N'|grep "N/A"| awk '{print $4}'|grep N |wc -l)
if [[ $(find /tmp -name "brick_status" -mmin +4 -print) ]]; then
echo "WARNING: there was an error during the gluster birck check. please check the output of /root/scripts/check_gluster.sh or the nrpe status"
exit 1
# check number of active bricks
elif [ "$NUMBER_OF_DOWN_BRICKS" -eq 0 ] ; then
echo "OK: All bricks on are up"
exit 0
else
echo "CRITICAL: There are one or more bricks down on $IP_OF_DOWN_BRICK"
exit 2
fi

View File

@ -0,0 +1,24 @@
#!/bin/bash
#Roy Cohen :roy@wondercohen.nl
#objective :igenerate Gluster output for Nagios checks
#First line of code :15/01/2019
#last update :28/01/2019
#version :1.1
#info:
#This script needs to be added to cron and run every 3 min
#Some general vars
STORAGE_IP=$(ip -4 addr show dev storage | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
######START OF SCRIPT#######
#check status of the varous volumes
/usr/sbin/gluster volume status all > /tmp/brick_status
#check peer status
/usr/sbin/gluster peer status > /tmp/peer_status
# get volume heal status and redirect it to a tmp file
for volume in $(/usr/sbin/gluster volume info|grep $STORAGE_IP | cut -d "/" -f3)
do
echo "volume, $volume"
/usr/sbin/gluster v heal $volume info |grep entries|cut -d ":" -f2
done > /tmp/gluster_monitoring_heal

View File

@ -0,0 +1,25 @@
#!/bin/bash
#Roy Cohen :roy@wondercohen.nl
#objective :Gluster check heal for Nagios
#First line of code :15/01/2019
#last update :23/01/2019
#version :1.0
######START OF SCRIPT#######
OUT=$(cat /tmp/gluster_monitoring_heal 2>/dev/null |grep -a -v volume |sed "s/^[ \t]*//"| grep -a -v ^0 > /dev/null 2>&1 )
EXIT_CODE=$?
if [ -f /tmp/gluster_monitoring_heal ]; then
if [[ $(find /tmp -name "gluster_monitoring_heal" -mmin +4 -print) ]]; then
echo "WARNING: there was an error during the gluster heal check. please check the output of /root/scripts/check_gluster.sh or the nrpe status"
exit 1
elif [ "$EXIT_CODE" -eq 0 ]; then
echo -e "WARNING: volumes healing at this moment\n$(cat /tmp/gluster_monitoring_heal |sed "s/^[ \t]*//" | grep -a -v ^0)" && rm -f /tmp/gluster_monitoring_heal
exit 1
else
echo "OK: no volumes healing at this moment" && rm -f /tmp/gluster_monitoring_heal
exit 0
fi
else
echo "WARNING: there was an error during the gluster heal check please check /tmp/gluster_monitoring_heal"
exit 1
fi

View File

@ -0,0 +1,23 @@
#!/bin/bash
#Roy Cohen :roy@wondercohen.nl
#objective :Gluster check peer for Nagios
#First line of code :15/01/2019
#last update :31/01/2019
#version :1.0
######START OF SCRIPT#######
SATE_OF_DOWN_PEER=$(cat /tmp/peer_status | grep -a Disconnected | wc -l)
IP_OF_DOWN_PEER=$(cat /tmp/peer_status| grep -a -E "Hostname|Disconnected"| grep -a -B1 Disconnected | grep -a Hostname | cut -d ":" -f2| sed "s/^[ \t]*//")
if [[ $(find /tmp -name "peer_status" -mmin +4 -print) ]]; then
echo "WARNING: there was an error during the gluster peer check. please check the output of /root/scripts/check_gluster.sh or the nrpe status"
exit 1
# check number of active bricks
elif [ "$SATE_OF_DOWN_PEER" -eq 0 ] ; then
echo "OK: All bricks on are up"
exit 0
else
echo "CRITICAL: There are one or more peers down on $IP_OF_DOWN_PEER"
exit 2
fi

View File

@ -0,0 +1,15 @@
#!/bin/bash
#Roy Cohen :roy@wondercohen.nl
#objective :Gluster check from within a pod server heal for Nagios
#First line of code :15/01/2019
#last update :24/01/2019
#version :1.0
######START OF SCRIPT#######
gluster volume status all > /brick_status
for volume in $(gluster volume info all | grep "Volume Name:"| cut -d ":" -f2)
do
echo "volume, $volume"
gluster v heal $volume info |grep entries|cut -d ":" -f2
done > /gluster_monitoring_heal

View File

@ -0,0 +1,41 @@
#!/bin/bash
#Roy Cohen :roy@wondercohen.nl
#objective :Check Gluster deamon state for Nagios
#First line of code :15/01/2019
#last update :17/01/2019
#version :1.0
#Some general vars
#glusterd pid status
GLUSTERD_STAT=$(pidof glusterd &>/dev/null)
#glusterfsd (brick daemon) pid status
GLUSTER_BRICK_STAT=$(pidof glusterfsd &>/dev/null)
######START OF SCRIPT#######
check_if_glusterd_is_running() {
#check if glusterd is running
if ! $GLUSTERD_STAT &>/dev/null; then
echo "CRITICAL: glusterd management daemon not running"
exit 2
else
echo "OK: glusterd management daemon is running"
exit 0
fi
}
check_if_glusterfsd_is_running() {
# check for glusterfsd (brick daemon)
if ! $GLUSTER_BRICK_STAT; then
echo "CRITICAL: glusterfsd brick daemon not running"
exit 2
else
echo "OK: glusterfsd brick daemon is running"
exit 0
fi
}
#### CALL THE FUNCTIONS ######
check_if_glusterd_is_running
check_if_glusterfsd_is_running

View File

@ -0,0 +1,12 @@
#!/bin/bash
#Roy Cohen :roy@wondercohen.nl
#objective :Copy check script to gluster pod for Nagios
#First line of code :15/01/2019
#last update :31/01/2019
#version :1.1
######START OF SCRIPT#######
POD=$(oc get pods -n glusterfs | grep Running | grep 'glusterfs-storage' | head -n 1 | cut -d' ' -f1)
cd /root/scripts/
oc cp check_gluster_pod.sh glusterfs/$POD:/
oc exec -n glusterfs $POD /check_gluster_pod.sh && oc cp glusterfs/$POD:gluster_monitoring_heal /tmp && oc cp glusterfs/$POD:brick_status /tmp && oc cp glusterfs/$POD:brick_peer /tmp

View File

@ -0,0 +1,20 @@
#!/bin/bash
#Roy Cohen :roy@wondercohen.nl
#objective :Gluster heal status volume check in a pod
#First line of code :15/01/2019
#last update :31/01/2019
#version :1.2
#info:
#This script needs to be added to cron and run every 3 min
#Some general vars
STORAGE_IP=$(ip -4 addr show eth0 | grep -oP '(?<=inet\s)\d+(\.\d+){3}')
######START OF SCRIPT#######
# get volume heal status and redirect it to a tmp file
for volume in $(/usr/sbin/gluster volume info|grep "Volume Name:" | cut -d ":" -f2)
do
echo "volume, $volume"
/usr/sbin/gluster v heal $volume info|grep "Status:"| grep "not connected"
done

View File

@ -0,0 +1,58 @@
#!/bin/bash
#Roy Cohen :roy@wondercohen.nl
#objective :Gluster check OSM server heal for Nagios
#First line of code :15/01/2019
#last update :24/01/2019
#version :1.0
######START OF SCRIPT#######
#gluster volume info all | grep "Volume Name:"| cut -d ":" -f2 > /tmp/volume_names
for volume in $(gluster volume info all | grep "Volume Name:"| cut -d ":" -f2)
do
echo "volume, $volume"
gluster v heal $volume info |grep entries|cut -d ":" -f2
done
#for volume in $(cat /tmp/volume_names)
#do
# OC="oc rsh -n"
# POD=$(oc get pods -n glusterfs | grep Running | grep 'glusterfs-storage' | head -n 1 | cut -d' ' -f1)
# VAR1="gluster v heal $volume info |grep entries|cut -d: -f 2"
#echo "volume, $volume"
# oc rsh -n glusterfs $POD | $VAR1
#done
oc rsh -n glusterfs $(oc get pods -n glusterfs | grep Running | grep 'glusterfs-storage' | head -n 1 | cut -d' ' -f1) gluster volume status all
oc rsh -n glusterfs $(oc get pods -n glusterfs | grep Running | grep 'glusterfs-storage' | head -n 1 | cut -d' ' -f1) ip -4 addr show
POD=$(oc get pods -n glusterfs | grep Running | grep 'glusterfs-storage' | head -n 1 | cut -d' ' -f1)
oc rsh -n glusterfs $POD gluster volume info all | grep "Volume Name:"| cut -d ":" -f2 > /tmp/volume_names
for volume in $(cat /tmp/volume_names)
do
OC="oc rsh -n"
POD=$(oc get pods -n glusterfs | grep Running | grep 'glusterfs-storage' | head -n 1 | cut -d' ' -f1)
VAR1="gluster v heal $volume info |grep entries|cut -d: -f 2"
echo "volume, $volume"
echo "$OC glusterfs $POD $VAR1"
done
/tmp/gluster_monitoring_heal
oc rsh -n glusterfs $(oc get pods -n glusterfs | grep Running | grep 'glusterfs-storage' | head -n 1 | cut -d' ' -f1) gluster v heal vol_fa562e2cd81f137ccb455633829be163 info|grep entries|cut -d ":" -f2
oc rsh -n glusterfs glusterfs-storage-4jq8n gluster v heal vol_fa562e2cd81f137ccb455633829be163 info|grep entries|cut -d ":" -f2
oc cp /tmp/database.sql mysql:/tmp/
kubectl cp <some-namespace>/<some-pod>:/tmp/foo /tmp/bar
oc cp glusterfs/glusterfs-storage-4jq8n:gluster_monitoring_heal /tmp