Wed Feb 2 17:22:41 PST 2005
- Previous message: [Slony1-commit] By darcyb: Repair variable substitution problem with bison -y (remove
- Next message: [Slony1-commit] By cbbrowne: Alter documentation to reflect the renaming of "$SETNAME"
- Messages sorted by: [ date ] [ thread ] [ subject ] [ author ]
Log Message:
-----------
Patches per Steve Simms to replace $SETNAME with $CLUSTER_NAME which
is really a much more consistent name to use to identify Slony-I
clusters...
Also, a change by Chris Browne that, for non-users of Apache log rotator,
puts the present timestamp in the name of the log file. That way there's
a rotation of sorts even if it isn't managed by "ALR".
Modified Files:
--------------
slony1-engine/tools/altperl:
README (r1.8 -> r1.9)
create_set.pl (r1.10 -> r1.11)
regenerate-listens.pl (r1.2 -> r1.3)
restart_nodes.pl (r1.3 -> r1.4)
show_configuration.pl (r1.2 -> r1.3)
slon-tools.pm (r1.15 -> r1.16)
slon.env (r1.8 -> r1.9)
slon_kill.pl (r1.7 -> r1.8)
slon_start.pl (r1.8 -> r1.9)
slon_watchdog.pl (r1.5 -> r1.6)
slon_watchdog2.pl (r1.5 -> r1.6)
uninstall_nodes.pl (r1.4 -> r1.5)
-------------- next part --------------
Index: slon_watchdog2.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon_watchdog2.pl,v
retrieving revision 1.5
retrieving revision 1.6
diff -Ltools/altperl/slon_watchdog2.pl -Ltools/altperl/slon_watchdog2.pl -u -w -r1.5 -r1.6
--- tools/altperl/slon_watchdog2.pl
+++ tools/altperl/slon_watchdog2.pl
@@ -17,7 +17,7 @@
$nodenum = $1;
}
-log_to_watchdog_log("Invoking watchdog for $SETNAME node $nodenum");
+log_to_watchdog_log("Invoking watchdog for $CLUSTER_NAME node $nodenum");
while (1) {
my $res = query_slony_status($nodenum); # See where the node stands
my $eventsOK;
@@ -53,7 +53,7 @@
# If the node needs a swift kick in the "RESTART", then submit that to slonik
if ($kick eq "YES") {
- log_to_watchdog_log("submit slonik to restart $SETNAME node $nodenum");
+ log_to_watchdog_log("submit slonik to restart $CLUSTER_NAME node $nodenum");
open(SLONIK, "|$SLON_BIN_PATH/slonik");
print SLONIK genheader();
print SLONIK "restart node $node\n";
@@ -61,7 +61,7 @@
}
if ($restart eq "YES") {
if ($pid) {
- log_to_watchdog_log("terminate slon daemon for $SETNAME node $nodenum");
+ log_to_watchdog_log("terminate slon daemon for $CLUSTER_NAME node $nodenum");
# Kill slon until dead...
kill 2, $pid;
sleep 3;
Index: slon-tools.pm
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon-tools.pm,v
retrieving revision 1.15
retrieving revision 1.16
diff -Ltools/altperl/slon-tools.pm -Ltools/altperl/slon-tools.pm -u -w -r1.15 -r1.16
--- tools/altperl/slon-tools.pm
+++ tools/altperl/slon-tools.pm
@@ -69,7 +69,7 @@
# This is the usual header to a slonik invocation that declares the
# cluster name and the set of nodes and how to connect to them.
sub genheader {
- my $header = "cluster name = $SETNAME;\n";
+ my $header = "cluster name = $CLUSTER_NAME;\n";
foreach my $node (@NODES) {
if ($DSN[$node]) {
my $dsn = $DSN[$node];
@@ -120,7 +120,7 @@
my $tpid;
my ($dbname, $dbport, $dbhost) = ($DBNAME[$nodenum], $PORT[$nodenum], $HOST[$nodenum]);
# print "Searching for PID for $dbname on port $dbport\n";
- my $command = ps_args() . "| egrep \"[s]lon .*$SETNAME\" | egrep \"host=$dbhost dbname=$dbname.*port=$dbport\" | sort -n | awk '{print \$2}'";
+ my $command = ps_args() . "| egrep \"[s]lon .*$CLUSTER_NAME\" | egrep \"host=$dbhost dbname=$dbname.*port=$dbport\" | sort -n | awk '{print \$2}'";
#print "Command:\n$command\n";
open(PSOUT, "$command|");
while ($tpid = <PSOUT>) {
@@ -137,9 +137,11 @@
my $cmd;
`mkdir -p $LOGDIR/slony1/node$nodenum`;
if ($APACHE_ROTATOR) {
- $cmd = "$SLON_BIN_PATH/slon -s 1000 -d2 $SETNAME '$dsn' 2>&1 | $APACHE_ROTATOR \"$LOGDIR/slony1/node$nodenum/" . $dbname . "_%Y-%m-%d_%H:%M:%S.log\" 10M&";
+ $cmd = "$SLON_BIN_PATH/slon -s 1000 -d2 $CLUSTER_NAME '$dsn' 2>&1 | $APACHE_ROTATOR \"$LOGDIR/slony1/node$nodenum/" . $dbname . "_%Y-%m-%d_%H:%M:%S.log\" 10M&";
} else {
- $cmd = "$SLON_BIN_PATH/slon -s 1000 -d2 $SETNAME '$dsn' 2>&1 > $LOGDIR/slony1/node$nodenum/$dbname.log &";
+ my $now=`date '+%Y-%m-%d_%H:%M:%S'`;
+ chomp $now;
+ $cmd = "$SLON_BIN_PATH/slon -s 1000 -d2 -g 80 $CLUSTER_NAME '$dsn' 2>&1 > $LOGDIR/slony1/node$nodenum/$dbname-$now.log &";
}
print "Invoke slon for node $nodenum - $cmd\n";
system $cmd;
@@ -154,10 +156,10 @@
# my $query = qq{
# select now() - ev_timestamp > '$killafter'::interval as event_old, now() - ev_timestamp as age,
# ev_timestamp, ev_seqno, ev_origin as origin
-# from _$SETNAME.sl_event events, _$SETNAME.sl_subscribe slony_master
+# from _$CLUSTER_NAME.sl_event events, _$CLUSTER_NAME.sl_subscribe slony_master
# where
# events.ev_origin = slony_master.sub_provider and
-# not exists (select * from _$SETNAME.sl_subscribe providers
+# not exists (select * from _$CLUSTER_NAME.sl_subscribe providers
# where providers.sub_receiver = slony_master.sub_provider and
# providers.sub_set = slony_master.sub_set and
# slony_master.sub_active = 't' and
@@ -172,23 +174,23 @@
select * from
(select now() - con_timestamp < '$killafter'::interval, now() - con_timestamp as age,
con_timestamp
-from _$SETNAME.sl_confirm c, _$SETNAME.sl_subscribe slony_master
+from _$CLUSTER_NAME.sl_confirm c, _$CLUSTER_NAME.sl_subscribe slony_master
where c.con_origin = slony_master.sub_provider and
- not exists (select * from _$SETNAME.sl_subscribe providers
+ not exists (select * from _$CLUSTER_NAME.sl_subscribe providers
where providers.sub_receiver = slony_master.sub_provider and
providers.sub_set = slony_master.sub_set and
slony_master.sub_active = 't' and
providers.sub_active = 't') and
- c.con_received = _$SETNAME.getLocalNodeId('_$SETNAME') and
+ c.con_received = _$CLUSTER_NAME.getLocalNodeId('_$CLUSTER_NAME') and
now() - con_timestamp < '$killafter'::interval
limit 1) as slave_confirmed_events
union all (select
now() - con_timestamp < '$killafter'::interval, now() - con_timestamp as age,
con_timestamp
-from _$SETNAME.sl_confirm c, _$SETNAME.sl_subscribe slony_master
- where c.con_origin = _$SETNAME.getLocalNodeId('_$SETNAME') and
- exists (select * from _$SETNAME.sl_subscribe providers
- where providers.sub_provider = _$SETNAME.getLocalNodeId('_$SETNAME') and
+from _$CLUSTER_NAME.sl_confirm c, _$CLUSTER_NAME.sl_subscribe slony_master
+ where c.con_origin = _$CLUSTER_NAME.getLocalNodeId('_$CLUSTER_NAME') and
+ exists (select * from _$CLUSTER_NAME.sl_subscribe providers
+ where providers.sub_provider = _$CLUSTER_NAME.getLocalNodeId('_$CLUSTER_NAME') and
slony_master.sub_active = 't') and
now() - con_timestamp < '$killafter'::interval
limit 1)
@@ -207,8 +209,8 @@
# to a sl_subscribe entry that is not yet active.
sub node_is_subscribing {
my $see_if_subscribing = qq {
-select * from "_$SETNAME".sl_event e, "_$SETNAME".sl_subscribe s
-where ev_origin = "_$SETNAME".getlocalnodeid('_$SETNAME') and -- Event on local node
+select * from "_$CLUSTER_NAME".sl_event e, "_$CLUSTER_NAME".sl_subscribe s
+where ev_origin = "_$CLUSTER_NAME".getlocalnodeid('_$CLUSTER_NAME') and -- Event on local node
ev_type = 'SUBSCRIBE_SET' and -- Event is SUBSCRIBE SET
--- Then, match criteria against sl_subscribe
sub_set = ev_data1 and sub_provider = ev_data2 and sub_receiver = ev_data3 and
Index: create_set.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/create_set.pl,v
retrieving revision 1.10
retrieving revision 1.11
diff -Ltools/altperl/create_set.pl -Ltools/altperl/create_set.pl -u -w -r1.10 -r1.11
--- tools/altperl/create_set.pl
+++ tools/altperl/create_set.pl
@@ -30,9 +30,9 @@
}
print OUTFILE "
try {
- create set (id = $set, origin = $MASTERNODE, comment = 'Set $set for $SETNAME');
+ create set (id = $set, origin = $MASTERNODE, comment = 'Set $set for $CLUSTER_NAME');
} on error {
- echo 'Could not create subscription set $set for $SETNAME!';
+ echo 'Could not create subscription set $set for $CLUSTER_NAME!';
exit -1;
}
";
Index: restart_nodes.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/restart_nodes.pl,v
retrieving revision 1.3
retrieving revision 1.4
diff -Ltools/altperl/restart_nodes.pl -Ltools/altperl/restart_nodes.pl -u -w -r1.3 -r1.4
--- tools/altperl/restart_nodes.pl
+++ tools/altperl/restart_nodes.pl
@@ -11,7 +11,7 @@
my $dsn = $DSN[$node];
open(SLONIK, ">$FILE");
print SLONIK qq{
- cluster name = $SETNAME ;
+ cluster name = $CLUSTER_NAME ;
node $node admin conninfo = '$dsn';
restart node $node;
};
Index: regenerate-listens.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/regenerate-listens.pl,v
retrieving revision 1.2
retrieving revision 1.3
diff -Ltools/altperl/regenerate-listens.pl -Ltools/altperl/regenerate-listens.pl -u -w -r1.2 -r1.3
Index: slon_watchdog.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon_watchdog.pl,v
retrieving revision 1.5
retrieving revision 1.6
diff -Ltools/altperl/slon_watchdog.pl -Ltools/altperl/slon_watchdog.pl -u -w -r1.5 -r1.6
--- tools/altperl/slon_watchdog.pl
+++ tools/altperl/slon_watchdog.pl
@@ -52,12 +52,12 @@
# Next, restart the slon process to service the node
start_slon($nodenum);
$pid = get_pid($node);
- print SLONLOG "WATCHDOG: Restarted slon for set $SETNAME, PID $pid\n";
+ print SLONLOG "WATCHDOG: Restarted slon for the $CLUSTER_NAME cluster, PID $pid\n";
} else {
open(LOG, ">>$LOGDIR/slon_watchdog.log");
print LOG "\n";
system "date >> $LOGDIR/slon_watchdog.log";
- print LOG "Found slon daemon running for set $SETNAME, PID $pid\n";
+ print LOG "Found slon daemon running for the $CLUSTER_NAME cluster, PID $pid\n";
print LOG "Looks Ok\n";
print LOG "Sleeping for $sleep seconds\n";
}
Index: uninstall_nodes.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/uninstall_nodes.pl,v
retrieving revision 1.4
retrieving revision 1.5
diff -Ltools/altperl/uninstall_nodes.pl -Ltools/altperl/uninstall_nodes.pl -u -w -r1.4 -r1.5
--- tools/altperl/uninstall_nodes.pl
+++ tools/altperl/uninstall_nodes.pl
@@ -16,12 +16,12 @@
run_slonik_script($FILE);
foreach my $node (@NODES) {
- foreach my $command ("drop schema _$SETNAME cascade;") {
+ foreach my $command ("drop schema _$CLUSTER_NAME cascade;") {
print $command, "\n";
print `echo "$command" | psql -h $HOST[$node] -U $USER[$node] -d $DBNAME[$node] -p $PORT[$node]`;
}
foreach my $t (@SERIALTABLES) {
- my $command = "alter table $t drop column \\\"_Slony-I_" . $SETNAME . "_rowID\\\";";
+ my $command = "alter table $t drop column \\\"_Slony-I_" . $CLUSTER_NAME . "_rowID\\\";";
print $command, "\n";
print `echo "$command" | psql -h $HOST[$node] -U $USER[$node] -d $DBNAME[$node] -p $PORT[$node]`;
}
Index: slon.env
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon.env,v
retrieving revision 1.8
retrieving revision 1.9
diff -Ltools/altperl/slon.env -Ltools/altperl/slon.env -u -w -r1.8 -r1.9
--- tools/altperl/slon.env
+++ tools/altperl/slon.env
@@ -7,7 +7,7 @@
require $ENV{"SLONYNODES"};
} else {
# Define environment locally...
- $SETNAME=flex2test;
+ $CLUSTER_NAME=flex2test;
$LOGDIR='/opt/logs/slon';
$SLON_BIN_PATH='/opt/OXRS/dbs/pgsql74/bin';
#$APACHE_ROTATOR="/opt/OXRS/apache/rotatelogs"; # optional path to Apache rotatelog tool
Index: show_configuration.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/show_configuration.pl,v
retrieving revision 1.2
retrieving revision 1.3
diff -Ltools/altperl/show_configuration.pl -Ltools/altperl/show_configuration.pl -u -w -r1.2 -r1.3
--- tools/altperl/show_configuration.pl
+++ tools/altperl/show_configuration.pl
@@ -18,7 +18,7 @@
}
print qq{
-Slony-I Cluster: $SETNAME
+Slony-I Cluster: $CLUSTER_NAME
Logs stored under $LOGDIR
Slony Binaries in: $SLON_BIN_PATH
};
Index: slon_start.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon_start.pl,v
retrieving revision 1.8
retrieving revision 1.9
diff -Ltools/altperl/slon_start.pl -Ltools/altperl/slon_start.pl -u -w -r1.8 -r1.9
--- tools/altperl/slon_start.pl
+++ tools/altperl/slon_start.pl
@@ -52,7 +52,7 @@
$pid = get_pid($node);
if ($pid) {
- die "Slon is already running for the '$SETNAME' set\n";
+ die "Slon is already running for the '$CLUSTER_NAME' cluster.\n";
}
my $dsn = $DSN[$nodenum];
@@ -61,9 +61,9 @@
$pid = get_pid($node);
unless ($pid) {
- print "Slon failed to start for cluster $SETNAME, node $node\n";
+ print "Slon failed to start for cluster $CLUSTER_NAME, node $node\n";
} else {
- print "Slon successfully started for cluster $SETNAME, node $node\n";
+ print "Slon successfully started for cluster $CLUSTER_NAME, node $node\n";
print "PID [$pid]\n";
if ($START_WATCHDOG) {
print "Start the watchdog process as well...\n";
Index: slon_kill.pl
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/slon_kill.pl,v
retrieving revision 1.7
retrieving revision 1.8
diff -Ltools/altperl/slon_kill.pl -Ltools/altperl/slon_kill.pl -u -w -r1.7 -r1.8
--- tools/altperl/slon_kill.pl
+++ tools/altperl/slon_kill.pl
@@ -29,7 +29,7 @@
require 'slon-tools.pm';
require $SLON_ENV_FILE;
-print "slon_kill.pl... Killing all slon and slon_watchdog instances for setname $SETNAME\n";
+print "slon_kill.pl... Killing all slon and slon_watchdog instances for the cluster $CLUSTER_NAME\n";
print "1. Kill slon watchdogs\n";
# kill the watchdog
@@ -43,7 +43,7 @@
# kill the slon daemon
$found="n";
-open(PSOUT, ps_args() . " | egrep \"[s]lon .*$SETNAME\" | sort -n | awk '{print \$2}'|");
+open(PSOUT, ps_args() . " | egrep \"[s]lon .*$CLUSTER_NAME\" | sort -n | awk '{print \$2}'|");
shut_off_processes();
close(PSOUT);
if ($found eq 'n') {
@@ -55,11 +55,11 @@
while ($pid = <PSOUT>) {
chomp $pid;
if (!($pid)) {
- print "No slon_watchdog is running for set $SETNAME!\n";
+ print "No slon_watchdog is running for the cluster $CLUSTER_NAME!\n";
} else {
$found="y";
kill 9, $pid;
- print "slon_watchdog for set $SETNAME killed - PID [$pid]\n";
+ print "slon_watchdog for cluster $CLUSTER_NAME killed - PID [$pid]\n";
}
}
}
Index: README
===================================================================
RCS file: /usr/local/cvsroot/slony1/slony1-engine/tools/altperl/README,v
retrieving revision 1.8
retrieving revision 1.9
diff -Ltools/altperl/README -Ltools/altperl/README -u -w -r1.8 -r1.9
--- tools/altperl/README
+++ tools/altperl/README
@@ -41,9 +41,9 @@
This configuration should be set up in the file represented in the
environment variable SLONYNODES.
- $SETNAME represents the name of the cluster. In each database
+ $CLUSTER_NAME represents the name of the cluster. In each database
involved in the replication set, you will find the namespace
- "_$SETNAME" that contains Slony-I's configuration tables
+ "_$CLUSTER_NAME" that contains Slony-I's configuration tables
$MASTERNODE is the number of the "master" node. It defaults to 1, if
not otherwise set.
- Previous message: [Slony1-commit] By darcyb: Repair variable substitution problem with bison -y (remove
- Next message: [Slony1-commit] By cbbrowne: Alter documentation to reflect the renaming of "$SETNAME"
- Messages sorted by: [ date ] [ thread ] [ subject ] [ author ]
More information about the Slony1-commit mailing list