From 7d348a43da4ff7076a81922a33a9a903ca86df67 Mon Sep 17 00:00:00 2001 From: Joseph Snow Date: Sat, 12 Nov 2011 14:35:25 -0700 Subject: [PATCH] migrating monitor and backup scripts from sleepless/fc => FinalsClub repo --- emails/systemEPLMonitorFailed.ejs | 12 +++ fc_monitor_epl.js | 166 ++++++++++++++++++++++++++++++ fcbackups/.gitignore | 2 + util/fc_monitor_epl_cron.sh | 22 ++++ util/mon-disk-space.sh | 46 +++++++++ 5 files changed, 248 insertions(+) create mode 100644 emails/systemEPLMonitorFailed.ejs create mode 100644 fc_monitor_epl.js create mode 100644 fcbackups/.gitignore create mode 100644 util/fc_monitor_epl_cron.sh create mode 100644 util/mon-disk-space.sh diff --git a/emails/systemEPLMonitorFailed.ejs b/emails/systemEPLMonitorFailed.ejs new file mode 100644 index 0000000..418a91c --- /dev/null +++ b/emails/systemEPLMonitorFailed.ejs @@ -0,0 +1,12 @@ +

FinalsClub EPL Monitor Warning

+ +

The Automated FinalsClub EPL Monitor script was not successfull.

+

DATE: <%= date %>

+

URL: <%= url %>

+ +

Monitor Report Details

+
<%= msgs %>
+ +FinalsClub.org + + diff --git a/fc_monitor_epl.js b/fc_monitor_epl.js new file mode 100644 index 0000000..59df6e0 --- /dev/null +++ b/fc_monitor_epl.js @@ -0,0 +1,166 @@ +// monitor EPL and send an email if unreachable + + +// Prerequisites +var http = require('http'); +var Mailer = require('./mailer.js'); + +// globals +var mailto = ['info@finalsclub.org', 'snow@sleepless.com']; + +var msgs = []; +function dlog(msg) { + msgs.push(msg); + console.log(msg); +} + + +function main() { + + var numRetries = 2; + var fc1Opts = { + host: 'finalsclub.org', + port: 9001, + path: '/', + method: 'GET', + timeout: 15 * 1000 + }; + + var errs = []; + + var date = new Date().toString(); + var url = 'http://' + fc1Opts.host + ':' + fc1Opts.port + fc1Opts.path; + dlog('FinalsClub EPL health check monitor'); + dlog('date: ' + date); + dlog('url: ' + url); + dlog(''); + + checkAlive(fc1Opts, numRetries, function (success, errMsg) { + var url = fc1Opts.host + ':' + fc1Opts.port + fc1Opts.path; + if (success) { + dlog('host is alive'); + dlog(''); + } else { + dlog('FAILED'); + dlog('host is dead - final error: ' + errMsg); + dlog(''); + + sendEmailAlert(url, msgs, date); + } + }); +} + +function checkAlive(httpOptions, retries, cb) { + var errs = []; + checkAlive2(httpOptions, retries, errs, cb); +} + +function checkAlive2(httpOptions, retries, errs, cb) { + checkAliveWorker(httpOptions, function (success, errMsg) { + if (success || retries <= 0) { + cb(success, errMsg); + } else { + dlog('Error: ' + errMsg + '\n\nretrying...'); + checkAlive2(httpOptions, retries - 1, errs, cb); + } + }); +} + +function checkAliveWorker(httpOptions, cb) { + + var timeoutDelayMS = httpOptions.timeout || 30 * 1000; + + // declare req var before using it's reference in timeout handler + var req = null; + + // init request timeout handler + var timeoutId = setTimeout(function () { + clearTimeout(timeoutId); + + if (cb) { + cb(false, 'timeout'); + cb = null; + } + req.abort(); + }, timeoutDelayMS); + + // init request now + req = http.request(httpOptions, function (res) { + // console.log('STATUS: ' + res.statusCode); + // console.log('HEADERS: ' + JSON.stringify(res.headers)); + res.setEncoding('utf8'); + res.on('data', function (chunk) { + // console.log('BODY: ' + chunk); + if (timeoutId) { + clearTimeout(timeoutId); + timeoutId = null; + } + + if (cb) { cb(true, null); } + cb = null; + }); + + if (res.statusCode != 200) { + if (timeoutId) { + clearTimeout(timeoutId); + timeoutId = null; + } + + var msg = ['invalid response code', + 'status: ' + res.statusCode, + 'headers: ' + JSON.stringify(res.headers)]; + + if (cb) { cb(false, msg.join('\n')); } + cb = null; + } + }); + + req.on('error', function (e) { + console.log('problem with request: ' + e.message); + if (timeoutId) { + clearTimeout(timeoutId); + timeoutId = null; + } + + if (cb) { cb(false, e.message); } + cb = null; + }); + + // close the request + req.end(); +} + + +function sendEmailAlert(url, msgs, date) { + var awsAccessKey = process.env.AWS_ACCESS_KEY_ID; + var awsSecretKey = process.env.AWS_SECRET_ACCESS_KEY; + var mailer = new Mailer(awsAccessKey, awsSecretKey); + + for (var i in mailto) { + var email = mailto[i]; + dlog('sending email alert to: ' + email); + var details = msgs.join('\n'); + var message = { + 'to': email, + 'subject': 'FinalsClub.org EPL Monitor Warning', + 'template': 'systemEPLMonitorFailed', + 'locals': { + 'url': url, + 'msgs': details, + 'date': date + } + }; + + + mailer.send(message, function (err, result) { + if (err) { + dlog('Error sending email\nError Message: ' + err.Message); + } else { + dlog('Successfully sent email.'); + } + }); + } +}; + +main(); + diff --git a/fcbackups/.gitignore b/fcbackups/.gitignore new file mode 100644 index 0000000..a56b41c --- /dev/null +++ b/fcbackups/.gitignore @@ -0,0 +1,2 @@ +/.fcbackup.env + diff --git a/util/fc_monitor_epl_cron.sh b/util/fc_monitor_epl_cron.sh new file mode 100644 index 0000000..b7d6f0e --- /dev/null +++ b/util/fc_monitor_epl_cron.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +## cron example +## ## this script checks if EPL is up and running. if not, it sends an alert email +5,20,35,50 * * * * /home/ec2-user/fc/util/fc_monitor_epl_cron.sh > /home/ec2-user/fc/util/fc_monitor_epl_cron.log.txt + + +## save the current working dir +pushd . +cd ~/fc/fcbackups + +## this scripts expects these vars to be set +## export AWS_ACCESS_KEY_ID= +## export AWS_SECRET_ACCESS_KEY= +if test -e .fcbackup.env ; then + source .fcbackup.env +fi + +cd ~/fc +node fc_monitor_epl.js +popd + diff --git a/util/mon-disk-space.sh b/util/mon-disk-space.sh new file mode 100644 index 0000000..ff70d30 --- /dev/null +++ b/util/mon-disk-space.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +## Author: Joseph Snow (snow@sleepless.com) 11/10/2011 +## script to check local disk space and submit data to AWS cloudwatch + +## VARS +export FC_HOME=~/fc +export AWS_CLOUDWATCH_HOME=$FC_HOME/util/CloudWatch-1.0.12.1 +export PATH=$AWS_CLOUDWATCH_HOME/bin:$PATH +export JAVA_HOME=/usr/lib/jvm/jre + + +## cron example +## ## this script updates custom disk space stats to AWS cloudwatch. this script should be run every 5 minutes +## */5 * * * * /home/ec2-user/fc/util/mon-disk-space.sh + +## this scripts expects these vars to be set +## export AWS_ACCESS_KEY_ID= +## export AWS_SECRET_ACCESS_KEY= +if test -e "$FC_HOME/fcbackups/.fcbackup.env" ; then + source "$FC_HOME/fcbackups/.fcbackup.env" +fi + + +path='/' +if [ -n "$1" ]; then + path=$1 +fi + +# get ec2 instance id +instanceid=`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id` + +freespace=`df --local --block-size=1M $path | grep $path | tr -s ' ' | cut -d ' ' -f 4` +usedpercent=`df --local $path | grep $path | tr -s ' ' | cut -d ' ' -f 5 | grep -o "[0-9]*"` + +echo "mon-disk-space AWS cloudwatch custom diskspace monitor" +echo "instanceid: $instanceid" +echo "freespace: $freespace" +echo "usedpercent: $usedpercent" + +# send the stats to AWS cloudwatch using the CloudWatch tools +mon-put-data --I $AWS_ACCESS_KEY_ID --S $AWS_SECRET_ACCESS_KEY --region "$EC2_REGION" --metric-name "FreeSpaceMBytes" --namespace "System/Linux" --dimensions "InstanceId=$instanceid,Path=$path" --value "$freespace" --unit "Megabytes" +mon-put-data --I $AWS_ACCESS_KEY_ID --S $AWS_SECRET_ACCESS_KEY --metric-name "UsedSpacePercent" --namespace "System/Linux" --dimensions "InstanceId=$instanceid,Path=$path" --value "$usedpercent" --unit "Percent" + +echo "done" + + -- 2.25.1