auto_clientcerts_dir: '/srv/puppet.debian.org/ca/RESULT/clientcerts'
apt::sources::debian::location: 'https://deb.debian.org/debian/'
-
-# all of these should be retired in favour of including the class role
-# with the host. weasel, 2019-09
-roles:
- postgresql_server:
- # these use pg-receive-file-from-backup which is defined in the
- # postgres::backup_source class. This should be
- # cleaned up and handled properly, including the ssh auth keys setup
- - lw07.debian.org
- - snapshotdb-manda-01.debian.org
-
classes:
- base::includes
---
classes:
- roles::postgresql::server
+
+postgres::backup_server::register_backup_clienthost::allow_read_hosts: ['fasolo']
---
classes:
- roles::postgresql::server
+ - roles::postgresql::cluster_bacula
classes:
- roles::snapshot_db
+ - roles::postgresql::server
+
+postgres::backup_server::register_backup_clienthost::allow_read_hosts: ['sallinen']
$localinfo = yamlinfo('*')
$nodeinfo = nodeinfo($::fqdn)
$allnodeinfo = allnodeinfo('sshRSAHostKey ipHostNumber', 'purpose mXRecord physicalHost purpose')
- $roles = hiera('roles')
}
| EOF
}
}
- postgresql-manda-01: {
- ferm::rule { 'dsa-postgres-bacula':
- description => 'Allow postgress access to cluster: bacula',
- domain => '(ip ip6)',
- rule => @("EOF"/$)
- &SERVICE_RANGE(tcp, 5432, (
- ${ join(getfromhash($deprecated::allnodeinfo, 'dinis.debian.org', 'ipHostNumber'), " ") }
- ${ join(getfromhash($deprecated::allnodeinfo, 'storace.debian.org', 'ipHostNumber'), " ") }
- ))
- | EOF
- }
- }
sallinen: {
ferm::rule { 'dsa-postgres':
description => 'Allow postgress access',
domain => '(ip ip6)',
rule => @("EOF"/$)
&SERVICE_RANGE(tcp, 5473, (
- ${ join(getfromhash($deprecated::allnodeinfo, 'lw07.debian.org', 'ipHostNumber'), " ") }
${ join(getfromhash($deprecated::allnodeinfo, 'snapshotdb-manda-01.debian.org', 'ipHostNumber'), " ") }
))
| EOF
}
}
- lw07: {
- ferm::rule { 'dsa-postgres-snapshot':
- description => 'Allow postgress access',
- rule => '&SERVICE_RANGE(tcp, 5439, ( 185.17.185.176/28 ))'
- }
- ferm::rule { 'dsa-postgres-snapshot6':
- domain => 'ip6',
- description => 'Allow postgress access',
- rule => '&SERVICE_RANGE(tcp, 5439, ( 2001:1af8:4020:b030::/64 ))'
- }
- }
snapshotdb-manda-01: {
ferm::rule { 'dsa-postgres-snapshot':
domain => '(ip ip6)',
--- /dev/null
+#!/usr/bin/python
+
+# Copyright (c) 2010 Peter Palfrader
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+# an ssh command wrapper,
+#
+# stores a file supplied by the calling host. We use this for postgres
+# backungs, storing both base backups and WAL files.
+#
+
+import sys
+import os
+import optparse
+import re
+import subprocess
+import syslog
+import tempfile
+import stat
+import hashlib
+
+
+basedir = '/srv/backups'
+accepted_fileclasses = ['pg']
+
+block_size = 4096
+
+syslog.openlog(sys.argv[0], syslog.LOG_PID, syslog.LOG_DAEMON)
+
+# Usage: debbackup-ssh-wrap [<options>] <calling host>
+# via ssh orig command: <host> store-file <class> <name> <size> <sha512>
+# <host> retrieve-file <class> <from_host> <name>
+
+def info(m):
+ syslog.syslog(syslog.LOG_INFO, m)
+
+def croak(m):
+ syslog.syslog(syslog.LOG_WARNING, m)
+ print >> sys.stderr, m
+ sys.exit(1)
+
+def filename_sanity_check(fn):
+ if re.search("[^a-zA-Z0-9._-]", fn):
+ croak("Invalid characters encountered in '%s'."%(fn))
+
+def get_classdir(file_class):
+ d = os.path.join(basedir, file_class)
+ if not os.path.exists(d):
+ croak("Classdir '%s' does not exist."%(d))
+ return d
+
+def get_targetdir(classdir, host, create=False):
+ d = os.path.join(classdir, host)
+ if not os.path.exists(d):
+ if create:
+ info("Creating %s"%(d))
+ os.mkdir(d)
+ else:
+ croak("Targetdir '%s' does not exist."%(d))
+ return d
+
+def sha512_for_file(fn):
+ d = hashlib.sha512()
+ f = open(fn)
+ while True:
+ data = f.read(block_size)
+ if not data: break
+ d.update(data)
+ f.close()
+ return d.hexdigest()
+
+
+def store_file(host, remote_args):
+# <class> <name> <size> <sha512>
+ if len(remote_args) != 4:
+ croak("Exactly four arguments expected for store-file.")
+ sys.exit(1)
+ (fileclass, filename, size, checksum) = remote_args
+
+ # check fileclass
+ if not fileclass in accepted_fileclasses:
+ croak("Invalid file class '%s'"%(fileclass))
+
+ # check filename
+ filename_sanity_check(filename)
+
+ # check and convert size
+ try:
+ size = int(size)
+ except ValueError:
+ croak("Invalid size argument '%s'"%(size))
+
+ # check checksum
+ if not re.match("^[a-f0-9]{128}$", checksum):
+ croak("Invalid checksum argument '%s'."%(checksum))
+
+ classdir = get_classdir(fileclass)
+ targetdir = get_targetdir(classdir, host, True)
+ target = os.path.join(targetdir, filename)
+
+ if os.path.exists(target):
+ checksum_on_disk = sha512_for_file(target)
+ size_on_disk = os.stat(target)[stat.ST_SIZE]
+ if size_on_disk == size and checksum_on_disk == checksum:
+ info("Target '%s' already exists, with same size and checksum (%d, %s)."%(target, size, checksum))
+ sys.exit(0)
+ else:
+ croak("Target '%s' already exists and has different size or checksum (%d vs %d; %s vs %s)."%(target, size_on_disk,size, checksum_on_disk, checksum))
+
+ tmp = tempfile.NamedTemporaryFile(dir=classdir, suffix=".%s.%s"%(host,filename))
+ info("Receiving remote %s from %s to %stmp (%s bytes)"%(filename, host, tmp.name, size))
+ running_size = 0
+ digest = hashlib.sha512()
+ while True:
+ buf = sys.stdin.read(block_size)
+ if not buf: break
+ digest.update(buf)
+ tmp.write(buf)
+
+ running_size += len(buf)
+ if running_size > size:
+ croak("Size mismatch")
+ tmp.flush()
+ file_size = os.stat(tmp.name)[stat.ST_SIZE]
+
+ if file_size != size:
+ croak("Size mismatch")
+ if file_size != running_size:
+ croak("Size mismatch. WTF.")
+ if checksum != digest.hexdigest():
+ croak("Checksum mismatch. WTF.")
+
+ try:
+ os.link(tmp.name, target)
+ except Exception, e:
+ croak("Failed at linking to target: %s"%(e))
+
+ tmp.close()
+ info("Successfully stored %s"%(target))
+
+
+def retrieve_file(host, remote_args, allowed_reads):
+# <class> <from_host> <name>
+ if len(remote_args) != 3:
+ croak("Exactly three arguments expected for retrieve-file.")
+ sys.exit(1)
+ (fileclass, from_host, filename) = remote_args
+
+ # check fileclass
+ if not fileclass in accepted_fileclasses:
+ croak("Invalid file class '%s'"%(fileclass))
+ # check filename
+ filename_sanity_check(filename)
+ # and host
+ filename_sanity_check(from_host)
+
+ classdir = get_classdir(fileclass)
+ sourcedir = get_targetdir(classdir, from_host)
+ source = os.path.join(sourcedir, filename)
+
+ abssource = os.path.abspath(source)
+ dirname = os.path.dirname(abssource)
+
+ if not dirname in allowed_reads:
+ croak("Host '%s' is not allowed to read from %s"%(host, dirname))
+
+ if not os.path.exists(abssource):
+ print "Format: 1"
+ print "Status: 404 not found"
+ info("Not sending %s to remote %s - file does not exist."%(abssource, host))
+ sys.exit(1)
+
+ file_size = os.stat(abssource)[stat.ST_SIZE]
+ sha512 = sha512_for_file(abssource)
+
+ info("Sending %s to remote %s (%s bytes)"%(abssource, host, file_size))
+
+ print "Format: 1"
+ print "Status: 200 OK"
+ print "Size: %d"%(file_size)
+ print "SHA-512: %s"%(sha512)
+ print
+ f = open(abssource)
+ while True:
+ data = f.read(block_size)
+ if not data: break
+ sys.stdout.write(data)
+ f.close()
+
+
+parser = optparse.OptionParser()
+parser.set_usage("%prog [<options>] <calling host> (local usage)\n" +
+ "via ssh orig command: <host> store-file <class> <name> <size> <sha512>\n" +
+ " <host> retrieve-file <class> <from_host> <name>")
+parser.add_option("-r", "--read-allow", dest="allowed_reads", metavar="DIR", action="append",
+ help="Allow host to read files in directory.")
+(options, args) = parser.parse_args()
+
+def ensure_args_not_empty(remote_args):
+ if len(remote_args) == 0:
+ croak("One more argument expected.")
+
+if len(args) != 1:
+ parser.print_help()
+ sys.exit(1)
+
+host = args.pop(0)
+
+if not 'SSH_ORIGINAL_COMMAND' in os.environ:
+ print >> sys.stderr, "Did not find SSH_ORIGINAL_COMMAND in environment."
+ sys.exit(1)
+
+remote_args = os.environ['SSH_ORIGINAL_COMMAND'].split()
+
+ensure_args_not_empty(remote_args)
+remote_supplied_hostname = remote_args.pop(0)
+if remote_supplied_hostname != host:
+ croak("Hostname passed from remote does not match locally supplied hostname.")
+
+ensure_args_not_empty(remote_args)
+action = remote_args.pop(0)
+info("Host %s called with action %s."%(host, action))
+if action == "store-file":
+ store_file(host, remote_args)
+elif action == "retrieve-file":
+ if options.allowed_reads is None:
+ croak("No directories from which read is allowed given on cmdline.")
+ retrieve_file(host, remote_args, options.allowed_reads)
+else:
+ croak("Invalid operation '%s'"%(action))
+
+# vim:set et:
+# vim:set ts=4:
+# vim:set shiftwidth=4:
# Backup this cluster
#
+# This define causes the cluster to be registered on the backupservers.
+#
+# Furthermore, if this cluster is managed with postgresql::server and
+# do_role and do_hba are set, we create the role and modify the pg_hba.conf file.
+#
+# Since postgresql::server only supports a single cluster per host, we are moving
+# towards our own postgres::cluster, and this define also exports a hba rule for
+# those (regardless of the do_hba setting). If the cluster is managed with
+# postgres::cluster and has its manage_hba option set, this will then cause the
+# backup hosts to be allowed to replacate.
+#
+# Regarless of how the cluster is managed, firewall rules are set up to allow
+# access from the backup hosts.
+#
# @param pg_version pg version of the cluster
# @param pg_cluster cluster name
# @param pg_port port of the postgres cluster
# @param db_backup_role replication role username
# @param db_backup_role_password password of the replication role
# @param do_role create the role (requires setup with postgresql::server)
-# @param do_hba update pg_hba (requires setup with postgresql::server)
+# @param do_hba update pg_hba (requires setup with postgresql::server)
define postgres::backup_cluster(
String $pg_version,
String $pg_cluster = 'main',
}
}
}
+
+ # Send connections to the port to the pg-backup chain
+ # there, the register_backup_clienthost class will have
+ # realized the exported allows from the backup servers.
+ #
+ # Any non-matching traffic will fall through and it can
+ # be allowed elsewhere
+ #
+ # this rule is only needed for clusters that we do not manage
+ # with postgres::cluster. Hopefully these will go away with time
ferm::rule::simple { "dsa-postgres-backup-${pg_port}":
- description => 'Allow postgress access from backup host',
+ description => 'Check for postgres access from backup host',
port => $pg_port,
- saddr => $backup_servers_addrs,
+ target => 'pg-backup',
}
+ postgres::cluster::hba_entry { "backup-replication::${pg_version}::${pg_cluster}":
+ pg_version => $pg_version,
+ pg_cluster => $pg_cluster,
+ pg_port => $pg_port,
+ database => 'replication',
+ user => db_backup_role,
+ address => $backup_servers_addrs,
+ }
postgres::backup_server::register_backup_cluster { "backup-role-${::fqdn}}-${pg_port}":
pg_port => $pg_port,
pg_role => $db_backup_role,
-#
+# postgres backup server
class postgres::backup_server {
include postgres::backup_server::globals
#
# do not let other hosts directly build our authorized_keys file,
# instead go via a script that somewhat validates intput
- file { '/etc/dsa/postgresql-backup':
- ensure => 'directory',
- }
- file { '/usr/local/bin/postgres-make-backup-sshauthkeys':
- content => template('postgres/backup_server/postgres-make-backup-sshauthkeys.erb'),
- mode => '0555',
- notify => Exec['postgres-make-backup-sshauthkeys'],
+ file { '/usr/local/bin/debbackup-ssh-wrap':
+ source => 'puppet:///modules/postgres/backup_server/debbackup-ssh-wrap',
+ mode => '0555'
}
file { '/usr/local/bin/postgres-make-one-base-backup':
source => 'puppet:///modules/postgres/backup_server/postgres-make-one-base-backup',
mode => '0555'
}
- file { '/etc/dsa/postgresql-backup/sshkeys-manual':
- content => template('postgres/backup_server/sshkeys-manual.erb'),
- notify => Exec['postgres-make-backup-sshauthkeys'],
- }
- concat { $postgres::backup_server::globals::sshkeys_sources:
- notify => Exec['postgres-make-backup-sshauthkeys'],
- }
- concat::fragment { 'postgresql-backup/source-sshkeys-header':
- target => $postgres::backup_server::globals::sshkeys_sources ,
- content => @(EOF),
- # <name> <ip addresses> <key>
- | EOF
- order => '00',
- }
- Concat::Fragment <<| tag == $postgres::backup_server::globals::tag_source_sshkey |>>
- exec { 'postgres-make-backup-sshauthkeys':
- command => '/usr/local/bin/postgres-make-backup-sshauthkeys',
- refreshonly => true,
+ ssh::authorized_key_collect { 'postgres::backup_server':
+ target_user => $postgres::backup_server::globals::backup_unix_user,
+ collect_tag => $postgres::backup_server::globals::tag_source_sshkey,
}
####
# Maintain /etc/nagios/dsa-check-backuppg.conf
#
+ file { '/etc/dsa/postgresql-backup':
+ ensure => 'directory',
+ }
file { '/etc/dsa/postgresql-backup/dsa-check-backuppg.conf.d':
ensure => 'directory',
purge => true,
mode => '0400'
}
Concat::Fragment <<| tag == $postgres::backup_server::globals::tag_source_pgpassline |>>
+
+ ####
+ # Let us connect to the clusters we want
+ #
+ # We export this, and the backup clients collect it
+ #
+ # this rule is only needed for clusters that we do not manage
+ # with postgres::cluster. Hopefully these will go away with time
+ @@ferm::rule::simple { "pg-backup_server::${::fqdn}":
+ tag => 'postgres::backup_server::to-client',
+ description => 'Allow access access from backup host',
+ chain => 'pg-backup',
+ saddr => $base::public_addresses,
+ }
}
+# register this host at the backup servers
#
+# This class set up the ssh authorization on the backup servers
+# so this client can push WAL segments. Furthermore, the
+# client will be allowed to read other hosts backups -- specify
+# the list of allowed target hosts via params.
+#
+# @param allow_read_basedir directory under which files can be read
+# @param allow_read_hosts subdirectories under base to allow
define postgres::backup_server::register_backup_clienthost (
- $sshpubkey = $::postgres_key,
- $ipaddrlist = join(getfromhash($deprecated::nodeinfo, 'ldap', 'ipHostNumber'), ","),
- $hostname = $::hostname,
+ String $allow_read_basedir = '/srv/backups/pg',
+ Array[Stdlib::Fqdn] $allow_read_hosts = lookup( { 'name' => 'postgres::backup_server::register_backup_clienthost::allow_read_hosts', 'default_value' => [] } ),
) {
include postgres::backup_server::globals
- if $sshpubkey {
- $addr = assert_type(String[1], $ipaddrlist)
- @@concat::fragment { "postgresql::server::backup-source-clienthost::$name::$fqdn":
- target => $postgres::backup_server::globals::sshkeys_sources ,
- content => @("EOF"),
- ${hostname} ${addr} ${sshpubkey}
- | EOF
- tag => $postgres::backup_server::globals::tag_source_sshkey,
- }
+ $allowstr = $allow_read_hosts.map |$host| { "--read-allow=${allow_read_basedir}/${host}" }.join(' ')
+ $ssh_command = "/usr/local/bin/debbackup-ssh-wrap ${allowstr} ${::hostname}"
+
+ ssh::authorized_key_add { 'register_backup_clienthost':
+ target_user => $postgres::backup_server::globals::backup_unix_user,
+ key => dig($facts, 'ssh_keys_users', 'postgres', 'id_rsa.pub', 'line'),
+ command => $ssh_command,
+ from => $base::public_addresses,
+ collect_tag => $postgres::backup_server::globals::tag_source_sshkey,
}
+
+ # this rule is only needed for clusters that we do not manage
+ # with postgres::cluster. Hopefully these will go away with time
+ Ferm::Rule::Simple <<| tag == 'postgres::backup_server::to-client' |>>
}
--- /dev/null
+# postgresql cluster configuration
+#
+# @param pg_version pg version of the cluster
+# @param pg_cluster cluster name
+# @param pg_port port of the postgres cluster
+# @param manage_hba manage pg_hba
+# @param confdir directory where the configuration resides
+define postgres::cluster(
+ String $pg_version,
+ String $pg_cluster = 'main',
+ Integer $pg_port = 5432,
+ Boolean $manage_hba = false,
+ String $confdir = "/etc/postgresql/${pg_version}/${pg_cluster}",
+) {
+ $reload = "postgresql ${pg_version}/${pg_cluster} reload"
+ exec { $reload:
+ command => "systemctl reload postgresql@${pg_version}-${pg_cluster}.service",
+ refreshonly => true,
+ }
+
+ ferm::rule::simple { "postgres::cluster::hba_entry::${pg_version}::${pg_cluster}":
+ description => "check access to pg${pg_version}/${pg_cluster}",
+ port => $pg_port,
+ target => "pg-${pg_port}",
+ }
+
+ # hba entries and firewall rules
+ Postgres::Cluster::Hba_entry <<| tag == "postgres::cluster::${pg_version}::${pg_cluster}::hba::${::fqdn}" |>>
+
+ if $manage_hba {
+ concat { "postgres::cluster::${pg_version}::${pg_cluster}::hba":
+ path => "${confdir}/pg_hba.conf",
+ mode => '0440',
+ group => 'postgres',
+ ensure_newline => true,
+ notify => Exec[$reload],
+ }
+ concat::fragment{ "postgres::cluster::pg_hba-head::${pg_version}::${pg_cluster}":
+ target => "postgres::cluster::${pg_version}::${pg_cluster}::hba",
+ order => '00',
+ content => template('postgres/cluster/pg_hba.conf-head.erb'),
+ }
+ Concat::Fragment <| tag == "postgres::cluster::${pg_version}::${pg_cluster}::hba" |>
+ }
+}
--- /dev/null
+# An entry in pg_hba and the corresponding firewall rule if necessary
+#
+# This currently only supports a limited number of entry types. Only
+# what we need at the moment.
+#
+# See the upstream documentation at https://www.postgresql.org/docs/11/auth-pg-hba-conf.html
+# for details.
+#
+# @param pg_port port of the postgres cluster
+# @param pg_cluster cluster name
+# @param pg_version pg version of the cluster
+# @param connection_type connection type
+# @param database database (or all, sameuser, replication, etc.)
+# @param user user (or all, etc.)
+# @param address hosts that match
+# @param method auth method
+# @param order ordering of this entry in pg_hba.conf
+define postgres::cluster::hba_entry (
+ Integer $pg_port,
+ String $pg_cluster,
+ String $pg_version,
+ Enum['local', 'hostssl'] $connection_type = 'hostssl',
+ Variant[String,Array[String]] $database = 'sameuser',
+ Variant[String,Array[String]] $user = 'all',
+ Optional[Variant[Stdlib::IP::Address, Array[Stdlib::IP::Address]]] $address = undef,
+ Enum['md5', 'trust'] $method = 'md5',
+ String $order = '50',
+) {
+ $address_methods = ['md5']
+ if $method in $address_methods {
+ if !$address {
+ fail("Authentication method ${method} needs an address")
+ }
+ } else {
+ if !($method in $address_methods) {
+ fail("Authentication method ${method} needs no address")
+ }
+ }
+
+ if ($address) {
+ ferm::rule::simple { "postgres::cluster::hba_entry::${name}":
+ description => "allow access to pg${pg_version}/${pg_cluster}: ${name}",
+ saddr => $address,
+ chain => "pg-${pg_port}",
+ }
+ }
+
+ $real_database = Array($database, true).sort().join(',')
+ $real_user = Array($user, true).sort().join(',')
+ $real_address = $address ? {
+ undef => [''],
+ default => Array($address, true).map |$a| {
+ if $a =~ Stdlib::IP::Address::V4::CIDR { $a }
+ elsif $a =~ Stdlib::IP::Address::V4::Nosubnet { "${a}/32" }
+ elsif $a =~ Stdlib::IP::Address::V6::CIDR { $a }
+ elsif $a =~ Stdlib::IP::Address::V6::Nosubnet { "${a}/128" }
+ else { fail("Do not know address type for ${a}") }
+ }
+ }
+
+ @concat::fragment { "postgres::cluster::pg_hba::${name}":
+ tag => "postgres::cluster::${pg_version}::${pg_cluster}::hba",
+ target => "postgres::cluster::${pg_version}::${pg_cluster}::hba",
+ order => $order,
+ content => inline_template( @(EOF) ),
+ #
+ # rule <%= @name %>
+ <% @real_address.each do |addr| -%>
+ <%= [@connection_type, @real_database, @real_user, addr, @method].join(' ') %>
+ <% end -%>
+ #
+ | EOF
+ }
+}
+++ /dev/null
-#!/bin/bash
-
-# Copyright 2017 Peter Palfrader
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-
-set -e
-set -u
-
-CONFFILE=/etc/dsa/postgresql-backup/sshkeys-sources
-OUTFILE=/etc/ssh/userkeys/debbackup
-HEAD=/etc/dsa/postgresql-backup/sshkeys-manual
-
-cat > "${OUTFILE}.new" << EOF
-# ###################
-# This file was created using postgres-make-backup-sshauthkeys.
-# Do not edit this manually but edit its sources and run the script (or let puppet run it for you).
-#####################
-EOF
-
-if [ -e "$HEAD" ] ; then
- echo "# $HEAD" >> "${OUTFILE}.new"
- cat "$HEAD" >> "${OUTFILE}.new"
- echo "# end of $HEAD" >> "${OUTFILE}.new"
- echo "" >> "${OUTFILE}.new"
-fi
-
-egrep -v '^(#|$)' "$CONFFILE" |
- while read host ipaddr key; do
-
- if [[ "$host" =~ [^a-z0-9A-Z_-] ]]; then
- echo >&2 "Invalid hostname $host"
- continue
- fi
- if [[ "$ipaddr" =~ [^0-9a-fA-F:.,] ]]; then
- echo >&2 "Invalid ipaddr $ipaddr"
- continue
- fi
-
- echo "command=\"/usr/local/bin/debbackup-ssh-wrap $host\",from=\"$ipaddr\",restrict $key" >> "${OUTFILE}.new"
-done
-
-mv "${OUTFILE}.new" ${OUTFILE}
-# vim:syn=sh:
set -u
if [ "$(id -u)" = 0 ]; then
- echo >&2 "Do not run me as root. Probably you want sudo -u <%= @backup_unix_user %>."
+ echo >&2 "Do not run me as root. Probably you want sudo -u <%= scope['postgres::backup_server::globals::backup_unix_user'] %>."
exit 1
fi
log "Cannot acquire lock on $flagfile, skipping $host:$port $version/$cluster."
fi
fi
-done < '<%= scope['postgres::backup_server::globals::base_backup_clusters'] %> '
+done < '<%= scope['postgres::backup_server::globals::base_backup_clusters'] %>'
+++ /dev/null
-# maintained manually in puppet
-# postgresql backups:
-command="/usr/local/bin/debbackup-ssh-wrap lw07 --read-allow=/srv/backups/pg/sallinen",restrict,from="185.17.185.187,2001:1af8:4020:b030:deb::187" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiLZIqnyKrsfoT1sQdbuUsOoqW1t71Sv8hpJj9yLzrSFq/YCnho9G2Q/LJm4sMB4W64uQMUX6oLsqsgIBbOZw71CBRou41zwS/D+7+sjiPy1aVXp+L+fAXqLdemCUYqXAm0bGTLboGmlDSG3/r3v3B2+vqwAoHaC/GwuoNgvHq+sfxZPo/9cDRlTyE0ktyxwdUN+czxyLtDPqz3CucOHX03p8F3lNEwFUCGIVAkP4zxZsiEjD+eCbWam0bVFoWnfXYcmf2GYKEy2PQp0ksXmbsnRIblW5zoKdEXeDjwSStFHtjqkJw2TdPLUGSXljCgy9OCXYVMUrFnXw2Ak88KYpV postgres@lw07 (20140713)
-command="/usr/local/bin/debbackup-ssh-wrap snapshotdb-manda-01 --read-allow=/srv/backups/pg/sallinen",restrict,from="82.195.75.73,2001:41b8:202:deb::311:73" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC53Sx/qzFL+GNrT01fP9tXpd9CjaOZuhLVHIOpoDQM5Nrr4DgbWA3vTghHpdpRHt18EmzWEmclTk3qej/vN6vBIG4cMc8EfpvEvXOLW2qQzMMrx5UeergUX76ie41B8yOCd9lf6H3G+rLqfBR6xEws39WgwTBRT86mKpolYDCJHX1Q8i85eJ/mw9FjHUENZYSxO4k5KBas2/G03+e+/J4TvgjyGbqCxc1RvmiMLE+cnfmeaprZuUbKkL0Df/mV2osuKStfG9ise/qtL0Kv318bsnYvXPDMdFWtFsR1lX2MpHfCFYWJd4bHtNOGSlixYbHcFlNFlSDessfLgpoKwWi3 postgres@snapshotdb-manda-01 (2019-05-23)
-
-
-## XXX this is only here because of the --read-allow
-command="/usr/local/bin/debbackup-ssh-wrap bmdb1 --read-allow=/srv/backups/pg/fasolo",restrict,from="5.153.231.10,2001:41c8:1000:21::21:10" ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFGdCqZ9/q5T5IgQ2RBUJ/4iIRFPkC+djquRlQEBjCLDZsnNrZC89K4u5IPMe0hCJCy+vp0mjKgzndLS3eyTuc0S8X8ukz8DawPY2smev72bKpf+2YEq/Eeyd42xoF0BbFSatM7GNWXJk+TyPXs2Pn8EGxVnVtDC5Z7VAxK+5qCr17duQG2NQbTawKiF2e+S2ohSsLZi4WUKx/lj/cUl3fmp0m7ZCwmEMImr/jUnm0eGw1k/1QKvqorajfjKpxs5dFPqfuvr9XaKs9mL2HtMH0OEbarDl+3kT4803X5xLT9b0kbWO9c9sAQRUmN9tPtZGiU5ShcBO7I0iKuQwDpxSr postgres@bmdb1 (20130706)
# edit with visudo!
-nagios ALL=(<%= @backup_unix_user %>) NOPASSWD: /usr/lib/nagios/plugins/dsa-check-backuppg ""
+nagios ALL=(<%= scope['postgres::backup_server::globals::backup_unix_user'] %>) NOPASSWD: /usr/lib/nagios/plugins/dsa-check-backuppg ""
--- /dev/null
+# PostgreSQL Client Authentication Configuration File
+# ===================================================
+# maintained via puppet
+
+# default entries allows local access:
+######################################
+# Database administrative login by Unix domain socket
+local all postgres peer
+
+# "local" is for Unix domain socket connections only
+local all all peer
+# IPv4 local connections:
+host all all 127.0.0.1/32 md5
+# IPv6 local connections:
+host all all ::1/128 md5
+# Allow replication connections from localhost, by a user with the
+# replication privilege.
+local replication all peer
+host replication all 127.0.0.1/32 md5
+host replication all ::1/128 md5
+######################################
+
+++ /dev/null
-module Puppet::Parser::Functions
- newfunction(:has_role, :type => :rvalue) do |args|
- role = args[0]
- roles = lookupvar('deprecated::roles')
- fqdn = lookupvar('fqdn')
- if not roles.include?(role)
- err "Failed to look up missing role #{role}"
- return false
- end
- case roles[role]
- when Hash then roles[role].include?(fqdn)
- else roles[role].map{ |k|
- case k
- when Hash then k.keys.first
- else k
- end
- }.include?(fqdn)
- end
- end
-end
# = Class: roles
#
class roles {
- if has_role('postgresql_server') {
- include postgres::backup_source
- }
-
if $::keyring_debian_org_mirror {
include roles::keyring_debian_org_mirror
}
--- /dev/null
+#
+# postgresql bacula cluster
+#
+class roles::postgresql::cluster_bacula {
+ $pg_port = 5432
+ $pg_cluster = 'bacula'
+ $pg_version = '11'
+
+ postgres::cluster { 'bacula':
+ pg_version => $pg_version,
+ pg_cluster => $pg_cluster,
+ pg_port => $pg_port,
+ manage_hba => true,
+ }
+ postgres::cluster::hba_entry { 'bacula-dir':
+ pg_version => $pg_version,
+ pg_cluster => $pg_cluster,
+ pg_port => $pg_port,
+ database => 'bacula',
+ user => ['bacula', 'bacula-dinis-reader', 'nagios'],
+ address => ['82.195.75.77', '2001:41b8:202:deb::311:77'],
+ }
+ postgres::cluster::hba_entry { 'bacula-sd':
+ pg_version => $pg_version,
+ pg_cluster => $pg_cluster,
+ pg_port => $pg_port,
+ database => 'bacula',
+ user => 'bacula-storace-reader',
+ address => ['93.94.130.161', '2a02:158:380:280::161'],
+ }
+}
crl = []
-roles = scope.lookupvar('deprecated::roles')
['diabelli.debian.org'].each do |node|
c = getcrl(node)
next if c.nil?
<%
allnodeinfo = scope.lookupvar('deprecated::allnodeinfo')
- roles = scope.lookupvar('deprecated::roles')
%>
# local admin