Commit 0b762bec authored by Quanah Gibson-Mount's avatar Quanah Gibson-Mount

Merge remote-tracking branch 'origin/master' into OPENLDAP_REL_ENG_2_5

parents 4d18c356 8557cc93
......@@ -96,6 +96,7 @@ servers/slapd/slapmodify
servers/slapd/slappasswd
servers/slapd/slapschema
servers/slapd/slaptest
servers/lloadd/lloadd
tests/progs/ldif-filter
tests/progs/slapd-addel
tests/progs/slapd-bind
......
......@@ -11,13 +11,13 @@ build-no-threads-no-slapd:
- make depend
- make
build-openssl-heimdal:
build-openssl-heimdal-lloadd:
stage: build
script:
- apt update
- DEBIAN_FRONTEND=noninteractive apt install -y build-essential pkg-config automake libsasl2-dev heimdal-multidev libssl-dev libltdl-dev groff-base unixodbc-dev libwiredtiger-dev libperl-dev heimdal-kdc libsasl2-modules-gssapi-heimdal sasl2-bin
- DEBIAN_FRONTEND=noninteractive apt install -y build-essential pkg-config automake libsasl2-dev heimdal-multidev libssl-dev libltdl-dev groff-base unixodbc-dev libwiredtiger-dev libperl-dev heimdal-kdc libsasl2-modules-gssapi-heimdal sasl2-bin libevent-dev
- autoreconf
- ./configure --enable-backends=mod --enable-overlays=mod --enable-modules --enable-dynamic --disable-ndb --disable-asyncmeta
- ./configure --enable-backends=mod --enable-overlays=mod --enable-modules --enable-dynamic --disable-ndb --disable-asyncmeta --enable-balancer=mod
- make depend
- make
- ulimit -n 4096 # back-monitor takes a while scanning a long connections array
......@@ -29,13 +29,13 @@ build-openssl-heimdal:
paths:
- tests/testrun/
build-gnutls-mit:
build-gnutls-mit-standalone-lloadd:
stage: build
script:
- apt update
- DEBIAN_FRONTEND=noninteractive apt install -y build-essential pkg-config automake libsasl2-dev libltdl-dev groff-base unixodbc-dev libwiredtiger-dev libperl-dev krb5-user krb5-kdc krb5-admin-server libsasl2-modules-gssapi-mit sasl2-bin libgnutls28-dev
- DEBIAN_FRONTEND=noninteractive apt install -y build-essential pkg-config automake libsasl2-dev libltdl-dev groff-base unixodbc-dev libwiredtiger-dev libperl-dev krb5-user krb5-kdc krb5-admin-server libsasl2-modules-gssapi-mit sasl2-bin libgnutls28-dev libevent-dev
- autoreconf
- ./configure --enable-backends=mod --enable-overlays=mod --disable-autoca --enable-modules --enable-dynamic --disable-ndb --disable-asyncmeta
- ./configure --enable-backends=mod --enable-overlays=mod --disable-autoca --enable-modules --enable-dynamic --disable-ndb --disable-asyncmeta --enable-balancer=yes
- make depend
- make
- ulimit -n 4096 # back-monitor takes a while scanning a long connections array
......
......@@ -44,6 +44,7 @@ shift
#MD="md5"
#OpenSSL
SHA="openssl sha1"
SHA3="openssl sha3-512"
MD="openssl md5"
if test -e $RELNAME ; then
......@@ -87,6 +88,7 @@ tar cf $RELNAME.tar $RELNAME
gzip -9 -c $RELNAME.tar > $RELNAME.tgz
${MD} $RELNAME.tgz > $RELNAME.md5
${SHA} $RELNAME.tgz > $RELNAME.sha1
${SHA3} $RELNAME.tgz > $RELNAME.sha3-512
rm -f $RELNAME.tar
ls -l $RELNAME.*
......
......@@ -164,6 +164,8 @@ LTHREAD_LIBS = @LTHREAD_LIBS@
SLAPD_NDB_LIBS = @SLAPD_NDB_LIBS@
WT_LIBS = @WT_LIBS@
LEVENT_LIBS = @LEVENT_LIBS@
LDAP_LIBLBER_LA = $(LDAP_LIBDIR)/liblber/liblber.la
LDAP_LIBLDAP_LA = $(LDAP_LIBDIR)/libldap/libldap.la
......@@ -175,6 +177,8 @@ LDAP_L = $(LDAP_LIBLUTIL_A) \
$(LDAP_LIBLDAP_LA) $(LDAP_LIBLBER_LA)
SLAPD_L = $(LDAP_LIBLUNICODE_A) $(LDAP_LIBREWRITE_A) \
$(LDAP_LIBLUTIL_A) $(LDAP_LIBLDAP_LA) $(LDAP_LIBLBER_LA)
LLOADD_L = $(LDAP_LIBLUTIL_A) $(LDAP_LIBLDAP_LA) \
$(LDAP_LIBLBER_LA)
WRAP_LIBS = @WRAP_LIBS@
# AutoConfig generated
......@@ -202,6 +206,7 @@ SLAPD_SQL_INCLUDES = @SLAPD_SQL_INCLUDES@
SLAPD_SQL_LIBS = @SLAPD_SQL_LIBS@
SLAPD_LIBS = @SLAPD_LIBS@ @SLAPD_PERL_LDFLAGS@ @SLAPD_SQL_LDFLAGS@ @SLAPD_SQL_LIBS@ @SLAPD_SLP_LIBS@ @SLAPD_GMP_LIBS@
LLOADD_LIBS = @BALANCER_LIBS@ $(LEVENT_LIBS)
# Our Defaults
CC = $(AC_CC)
......
......@@ -2347,7 +2347,6 @@ print_syncstate( LDAP *ld, LDAPControl *ctrl )
char buf[LDAP_LUTIL_UUIDSTR_BUFSIZE], *uuidstr = "(UUID malformed)";
BerElement *ber;
ber_tag_t tag;
ber_len_t len;
ber_int_t state;
int rc;
......@@ -2422,7 +2421,6 @@ print_syncdone( LDAP *ld, LDAPControl *ctrl )
{
BerElement *ber;
struct berval cookie = BER_BVNULL;
ber_tag_t tag;
ber_len_t len;
ber_int_t refreshDeletes = 0;
......
......@@ -479,7 +479,7 @@ handle_private_option( int i )
pagedResults = 1 + crit;
} else if ( strcasecmp( control, "ps" ) == 0 ) {
int num, tmp;
int num;
/* PersistentSearch control */
if ( psearch != 0 ) {
fprintf( stderr,
......@@ -785,7 +785,6 @@ handle_private_option( int i )
#ifdef LDAP_CONTROL_X_SHOW_DELETED
} else if ( strcasecmp( control, "showDeleted" ) == 0 ) {
int num, tmp;
if( showDeleted ) {
fprintf( stderr,
_("showDeleted control previously specified\n"));
......@@ -802,7 +801,6 @@ handle_private_option( int i )
#ifdef LDAP_CONTROL_X_SERVER_NOTIFICATION
} else if ( strcasecmp( control, "serverNotif" ) == 0 ) {
int num, tmp;
if( serverNotif ) {
fprintf( stderr,
_("serverNotif control previously specified\n"));
......
......@@ -407,6 +407,13 @@ OL_ARG_ENABLE(unique, [AS_HELP_STRING([--enable-unique], [Attribute Uniqueness o
OL_ARG_ENABLE(valsort, [AS_HELP_STRING([--enable-valsort], [Value Sorting overlay])],
no, [no yes mod], ol_enable_overlays)
dnl ----------------------------------------------------------------
dnl BALANCER OPTIONS
AC_ARG_ENABLE(balanceroptions,[
LLOADD (Load Balancer Daemon) Options:])
OL_ARG_ENABLE(balancer, [AS_HELP_STRING([--enable-balancer], [enable load balancer])],
no, [no yes mod])
dnl ----------------------------------------------------------------
AC_ARG_ENABLE(xxliboptions,[
Library Generation & Linking Options])
......@@ -436,13 +443,18 @@ if test $ol_enable_slapd = no ; then
eval "ol_enable_$i=no"
fi
done
if test $ol_enable_balancer = mod ; then
AC_MSG_WARN([slapd disabled, ignoring --enable-balancer=mod argument])
ol_enable_balancer=no
fi
else
dnl If slapd enabled and loadable module support disabled
dnl then require at least one built-in backend
if test $ol_enable_modules = no; then
for i in backends overlays $Backends $Overlays; do
for i in backends overlays balancer $Backends $Overlays; do
eval "ol_tmp=\$ol_enable_$i"
if test -n "$ol_tmp" && test "$ol_tmp" = mod ; then
AC_MSG_ERROR([--enable-$i=mod requires --enable-modules])
......@@ -478,6 +490,13 @@ if test $ol_enable_modules = yes ; then
ol_enable_dynamic=yes
fi
if test $ol_enable_balancer != no ; then
dnl Load Balancer was specifically enabled
if test $ol_with_threads = no ; then
AC_MSG_ERROR([Load balancer requires threads])
fi
fi
if test $ol_enable_spasswd = yes ; then
if test $ol_with_cyrus_sasl = no ; then
AC_MSG_ERROR([--enable-spasswd requires --with-cyrus-sasl])
......@@ -501,13 +520,17 @@ LDAP_LIBS=
SLAPD_NDB_LIBS=
SLAPD_NDB_INCS=
LTHREAD_LIBS=
LEVENT_LIBS=
LUTIL_LIBS=
CLIENT_LIBS=
SLAPD_LIBS=
BALANCER_LIBS=
BALANCER_INCLUDE=
BUILD_SLAPD=no
BUILD_BALANCER=no
BUILD_THREAD=no
......@@ -2125,6 +2148,24 @@ if test $ol_enable_slp != no ; then
fi
fi
dnl ----------------------------------------------------------------
dnl Libevent
if test $ol_enable_balancer != no ; then
AC_CHECK_LIB(event_extra, evdns_base_new,
[have_libevent=yes
LEVENT_LIBS="$LEVENT_LIBS -levent_core -levent_extra"],
[AC_CHECK_LIB(event, evdns_base_new,
[have_libevent=yes
LEVENT_LIBS="$LEVENT_LIBS -levent"],
[have_libevent=no])])
if test $have_libevent = yes ; then
AC_DEFINE(HAVE_LIBEVENT, 1, [define if you have -levent])
else
AC_MSG_ERROR([You need libevent 2.0 or later with DNS support to build the load balancer])
fi
fi
dnl ----------------------------------------------------------------
dnl Checks for typedefs, structures, and compiler characteristics.
......@@ -2911,6 +2952,18 @@ if test "$ol_enable_valsort" != no ; then
AC_DEFINE_UNQUOTED(SLAPD_OVER_VALSORT,$MFLAG,[define for Value Sorting overlay])
fi
if test "$ol_enable_balancer" != no \
-a "$ol_with_threads" != no \
-a "$have_libevent" = yes ; then
if test "$ol_enable_balancer" = mod; then
BALANCER_INCLUDE=Makefile.module
BUILD_BALANCER=mod
else
BALANCER_INCLUDE=Makefile.server
BUILD_BALANCER=yes
fi
fi
if test "$ol_enable_slapi" != no ; then
AC_DEFINE(ENABLE_SLAPI,1,[define to enable slapi library])
BUILD_SLAPI=yes
......@@ -2983,14 +3036,17 @@ dnl overlays
AC_SUBST(BUILD_TRANSLUCENT)
AC_SUBST(BUILD_UNIQUE)
AC_SUBST(BUILD_VALSORT)
AC_SUBST(BUILD_BALANCER)
AC_SUBST(LDAP_LIBS)
AC_SUBST(CLIENT_LIBS)
AC_SUBST(SLAPD_LIBS)
AC_SUBST(BALANCER_LIBS)
AC_SUBST(SLAPD_NDB_LIBS)
AC_SUBST(SLAPD_NDB_INCS)
AC_SUBST(LTHREAD_LIBS)
AC_SUBST(LUTIL_LIBS)
AC_SUBST(LEVENT_LIBS)
AC_SUBST(WRAP_LIBS)
AC_SUBST(SLAPD_MODULES_CPPFLAGS)
......@@ -3026,6 +3082,8 @@ AC_SUBST(SLAPD_SQL_INCLUDES)
AC_SUBST(WT_INCS)
AC_SUBST(WT_LIBS)
AC_SUBST(BALANCER_INCLUDE)
dnl ----------------------------------------------------------------
dnl final help output
AC_ARG_WITH(xxinstall,[
......@@ -3070,6 +3128,9 @@ AC_CONFIG_FILES([Makefile:build/top.mk:Makefile.in:build/dir.mk]
[servers/slapd/back-wt/Makefile:build/top.mk:servers/slapd/back-wt/Makefile.in:build/mod.mk]
[servers/slapd/slapi/Makefile:build/top.mk:servers/slapd/slapi/Makefile.in:build/lib.mk:build/lib-shared.mk]
[servers/slapd/overlays/Makefile:build/top.mk:servers/slapd/overlays/Makefile.in:build/lib.mk]
[servers/lloadd/Makefile:build/top.mk:servers/lloadd/Makefile.in]
[servers/lloadd/Makefile.server:servers/lloadd/Makefile_server.in:build/srv.mk]
[servers/lloadd/Makefile.module:servers/lloadd/Makefile_module.in:build/mod.mk]
[tests/Makefile:build/top.mk:tests/Makefile.in:build/dir.mk]
[tests/run]
[tests/progs/Makefile:build/top.mk:tests/progs/Makefile.in:build/rules.mk])
......
TODO:
- [ ] keep a global op in-flight counter? (might need locking)
- [-] scheduling (who does what, more than one select thread? How does the proxy
work get distributed between threads?)
- [ ] managing timeouts?
- [X] outline locking policy: seems like there might be a lock inversion in the
design looming: when working with op, might need a lock on both client and
upstream but depending on where we started, we might want to start with
locking one, then other
- [ ] how to deal with the balancer running out of fds? Especially when we hit
the limit, then lose an upstream connection and accept() a client, we
wouldn't be able to initiate a new one. A bit of a DoS... But probably not
a concern for Ericsson
- [ ] non-Linux? No idea how anything other than poll works (moot if building a
libevent/libuv-based load balancer since they take care of that, except
edge-triggered I/O?)
- [-] rootDSE? Controls and exops might have different semantics and need
binding to the same upstream connection.
- [ ] Just piggybacking on OpenLDAP as a module? Would still need some updates
in the core and the module/subsystem would be a very invasive one. On the
other hand, allows to expose live configuration and monitoring over LDAP
over the current slapd listeners without re-inventing the wheel.
Expecting to handle only LDAPv3
terms:
server - configured target
upstream - a single connection to a server
client - an incoming connection
To maintain fairness `G( requested => ( F( progressed | failed ) ) )`, use
queues and put timeouts in
Runtime organisation
------
- main thread with its own event base handling signals
- one thread (later possibly more) listening on the rendezvous sockets, handing
the new sockets to worker threads
- n worker threads dealing with client and server I/O (dispatching actual work
to the thread pool most likely)
- a thread pool to handle actual work
Operational behaviour
------
- client read -> upstream write:
- client read:
- if TLS_SETUP, keep processing, set state back when finished and note that
we're under TLS
- ber_get_next(), if we don't have a tag, finished (unless we have true
edge-triggered I/O, also put the fd back into the ones we're waiting for)
- peek at op tag:
- unbind:
- with a single lock, mark all pending ops in upstreams abandoned, clear
client link (would it be fast enough if we remove them from upstream
map instead?)
- locked per op:
- remove op from upstream map
- check upstream is not write-suspended, if it is ...
- try to write the abandon op to upstream, suspend upstream if not
fully sent
- remove op from client map (how if we're in avl_apply?, another pass?)
- would be nice if we could wipe the complete client map then, otherwise
we need to queue it to have it freed when all abandons get passed onto
the upstream (just dropping them might put extra strain on upstreams,
will probably have a queue on each client/upstream anyway, not just a
single Ber)
- bind:
- check mechanism is not EXTERNAL (or implement it)
- abandon existing ops (see unbind)
- set state to BINDING, put DN into authzid
- pick upstream, create PDU and sent
- abandon:
- find op, mark for abandon, send to appropriate upstream
- Exop:
- check not BINDING (unless it's a cancel?)
- check OID:
- STARTTLS:
- check we don't have TLS yet
- abandon all
- set state to TLS_SETUP
- send the hello
- VC(?):
- similar to bind except for the abandons/state change
- other:
- check not BINDING
- pick an upstream
- create a PDU, send (marking upstream suspended if not written in full)
- check if should read again (keep a counter of number of times to read
off a connection in a single pass so that we maintain fairness)
- if read enough requests and can still read, re-queue ourselves (if we
don't have true edge-triggered I/O, we can just register the fd again)
- upstream write (only when suspended):
- flush the current BER
- there shouldn't be anything else?
- upstream read -> client write:
- upstream read:
- ber_get_next(), if we don't have a tag, finished (unless we have true
edge-triggered I/O, also put the fd back into the ones we're waiting for)
- when we get it, peek at msgid, resolve client connection, lock, check:
- if unsolicited, handle as close (and mark connection closing)
- if op is abandoned or does not exist, drop PDU and op, update counters
- if client backlogged, suspend upstream, register callback to unsuspend
(on progress when writing to client or abandon from client (connection
death, abandon proper, ...))
- reconstruct final PDU, write BER to client, if did not write fully,
suspend client
- if a final response, decrement operation counts on upstream and client
- check if should read again (keep a counter of number of responses to read
off a connection in a single pass so that we don't starve any?)
- client write ready (only checked for when suspended):
- write the rest of pending BER if any
- on successful write, pick all pending ops that need failure response, push
to client (are there any controls that need to be present in response even
in the case of failure?, what to do with them?)
- on successfully flushing them, walk through suspended upstreams, picking
the pending PDU (unsuspending the upstream) and writing, if PDU flushed
successfully, pick next upstream
- if we successfully flushed all suspended upstreams, unsuspend client
(and disable the write callback)
- upstream close/error:
- look up pending ops, try to write to clients, mark clients suspended that
have ops that need responses (another queue associated with client to speed
up?)
- schedule a new connection open
- client close/error:
- same as unbind
- client inactive (no pending ops and nothing happened in x seconds)
- might just send notice of disconnection and close
- op timeout handling:
- mark for abandon
- send abandon
- send timeLimitExceeded/adminLimitExceeded to client
Picking an upstream:
- while there is a level available:
- pick a random ordering of upstreams based on weights
- while there is an upstream in the level:
- check number of ops in-flight (this is where we lock the upstream map)
- find the least busy connection (and check if a new connection should be
opened)
- try to lock for socket write, if available (no BER queued) we have our
upstream
PDU processing:
- request (have an upstream selected):
- get new msgid from upstream
- create an Op structure (actually, with the need for freelist lock, we can
make it a cache for freed operation structures, avoiding some malloc
traffic, to reset, we need slap_sl_mem_create( ,,, 1 ))
- check proxyauthz is not present? or just let upstream reject it if there are
two?
- add own controls at the end:
- construct proxyauthz from authzid
- construct session tracking from remote IP, own name, authzid
- send over
- insert Op into client and upstream maps
- response/intermediate/entry:
- look up Op in upstream's map
- write old msgid, rest of the response can go unchanged
- if a response, remove Op from all maps (client and upstream)
Managing upstreams:
- async connect up to min_connections (is there a point in having a connection
count range if we can't use it when needed since all of the below is async?)
- when connected, set up TLS (if requested)
- when done, send a bind
- go for the bind interaction
- when done, add it to the upstream's connection list
- (if a connection is suspended or connections are over 75 % op limit, schedule
creating a new connection setup unless connection limit has been hit)
Managing timeouts:
- two options:
- maintain a separate locked priority queue to give a perfect ordering to when
each operation is to time out, would need to maintain yet another place
where operations can be found.
- the locking protocol for disposing of the operation would need to be
adjusted and might become even more complicated, might do the alternative
initially and then attempt this if it helps performance
- just do a sweep over all clients (that mutex is less contended) every so
often. With many in-flight operations might be a lot of wasted work.
- we still need to sweep over all clients to check if they should be killed
anyway
Dispatcher thread (2^n of them, fd x is handled by thread no x % (2^n)):
- poll on all registered fds
- remove each fd that's ready from the registered list and schedule the work
- work threads can put their fd back in if they deem necessary (=not suspended)
- this works as a poor man's edge-triggered polling, with enough workers, should
we do proper edge triggered I/O? What about non-Linux?
Listener thread:
- slapd has just one, which then reassigns the sockets to separate I/O
threads
Threading:
- if using slap_sl_malloc, how much perf do we gain? To allocate a context per
op, we should have a dedicated parent context so that when we free it, we can
use that exclusively. The parent context's parent would be the main thread's
context. This implies a lot of slap_sl_mem_setctx/slap_sl_mem_create( ,,, 0 )
and making sure an op does not allocate/free things from two threads at the
same time (might need an Op mutex after all? Not such a huge cost if we
routinely reuse Op structures)
Locking policy:
- read mutexes are unnecessary, we only have one thread receiving data from the
connection - the one started from the dispatcher
- two reference counters of operation structures (an op is accessible from
client and upstream map, each counter is consistent when thread has a lock on
corresponding map), when decreasing the counter to zero, start freeing
procedure
- place to mark disposal finished for each side, consistency enforced by holding
the freelist lock when reading/manipulating
- when op is created, we already have a write lock on upstream socket and map,
start writing, insert to upstream map with upstream refcount 1, unlock, lock
client, insert (client refcount 0), unlock, lock upstream, decrement refcount
(triggers a test if we need to drop it now), unlock upstream, done
- when upstream processes a PDU, locks its map, increments counter, (potentially
removes if it's a response), unlocks, locks client's map, write mutex (this
order?) and full client mutex (if a bind response)
- when client side wants to work with a PDU (abandon, (un)bind), locks its map,
increase refcount, unlocks, locks upstream map, write mutex, sends or queues
abandon, unlocks write mutex, initiates freeing procedure from upstream side
(or if having to remember we've already increased client-side refcount, mark
for deletion, lose upstream lock, lock client, decref, either triggering
deletion from client or mark for it)
- if we have operation lock, we can simplify a bit (no need for three-stage
locking above)
Shutdown:
- stop accept() thread(s) - potentially add a channel to hand these listening
sockets over for zero-downtime restart
- if very gentle, mark connections as closing, start timeout and:
- when a new non-abandon PDU comes in from client - return LDAP_UNAVAILABLE
- when receiving a PDU from upstream, send over to client, if no ops pending,
send unsolicited response and close (RFC4511 suggests unsolicited response
is the last PDU coming from the upstream and libldap agrees, so we can't
send it for a socket we want to shut down more gracefully)
- gentle (or very gentle timed out):
- set timeout
- mark all ops as abandoned
- send unbind to all upstreams
- send unsolicited to all clients
- imminent (or gentle timed out):
- async close all connections?
- exit()
RootDSE:
- default option is not to care and if a control/exop has special restrictions,
it is the admin's job to flag it as such in the load-balancer's config
- another is not to care about the search request but check each search entry
being passed back, check DN and if it's a rootDSE, filter the list of
controls/exops/sasl mechs (external!) that are supported
- last one is to check all search requests for the DN/scope and synthesise the
response locally - probably not (would need to configure the complete list of
controls, exops, sasl mechs, naming contexts in the balancer)
Potential red flags:
- we suspend upstreams, if we ever suspend clients we need to be sure we can't
create dependency cycles
- is this an issue when only suspending the read side of each? Because even if
we stop reading from everything, we should eventually flush data to those we
can still talk to, as upstreams are flushed, we can start sending new
requests from live clients (those that are suspended are due to their own
inability to accept data)
- we might need to suspend a client if there is a reason to choose a
particular upstream (multi-request operation - bind, VC, PR, TXN, ...)
- a SASL bind, but that means there are no outstanding ops to receive
it holds that !suspended(client) \or !suspended(upstream), so they
cannot participate in a cycle
- VC - multiple binds at the same time - !!! more analysis needed
- PR - should only be able to have one per connection (that's a problem
for later, maybe even needs a dedicated upstream connection)
- TXN - ??? probably same situation as PR
- or if we have a queue for pending Bers on the server, we not need to suspend
clients, upstream is only chosen if the queue is free or there is a reason
to send it to that particular upstream (multi-stage bind/VC, PR, ...), but
that still makes it possible for a client to exhaust all our memory by
sending requests (VC or other ones bound to a slow upstream or by not
reading the responses at all)
This diff is collapsed.
......@@ -143,6 +143,9 @@ to expand the group.
Values of the
.B dgAuthz
attribute must conform to the (experimental) \fIOpenLDAP authz\fP syntax.
When using dynamic memberOf in search filters, search access to the
.B entryDN
pseudo-attribute is required.
.SH EXAMPLE
This example collects all the email addresses of a database into a single
......@@ -221,7 +224,7 @@ attribute to all the members of a dynamic group:
This example extends the dynamic memberOf feature to add the
.B dgMemberOf
.B memberOf
attribute to all the members of both static and dynamic groups:
.LP
.nf
......@@ -232,7 +235,7 @@ attribute to all the members of both static and dynamic groups:
# ...
overlay dynlist
dynlist\-attrset groupOfURLs memberURL member+dgMemberOf@groupOfNames
dynlist\-attrset groupOfURLs memberURL member+memberOf@groupOfNames
.fi
.LP
This dynamic memberOf feature can fully replace the functionality of the
......
.TH LLOADD 8C "RELEASEDATE" "OpenLDAP LDVERSION"
.\" Copyright 2017-2020 The OpenLDAP Foundation All Rights Reserved.
.\" Copying restrictions apply. See COPYRIGHT/LICENSE.
.\" $OpenLDAP$
.SH NAME
lloadd \- LDAP Load Balancer Daemon
.SH SYNOPSIS
.B LIBEXECDIR/lloadd
[\c
.BR \-4 | \-6 ]
[\c
.BI \-d \ debug-level\fR]
[\c
.BI \-f \ lloadd-config-file\fR]
[\c
.BI \-h \ URLs\fR]
[\c
.BI \-n \ service-name\fR]
[\c
.BI \-s \ syslog-level\fR]
[\c
.BI \-l \ syslog-local-user\fR]
[\c
.BI \-o \ option\fR[ = value\fR]]
[\c
.BI \-r \ directory\fR]
[\c
.BI \-u \ user\fR]
[\c
.BI \-g \ group\fR]
.SH DESCRIPTION
.LP
.B Lloadd
is the stand-alone LDAP daemon. It listens for LDAP connections on
any number of ports (default \fB389\fP), forwarding the LDAP operations
it receives over these connections to be handled by the configured
backends.