Commit 66b4fe2b authored by Kurt Zeilenga's avatar Kurt Zeilenga
Browse files

Import ITS#4439 (slapd not responding) fix for BDB/HDB cache from HEAD

parent b3d59b97
......@@ -12,6 +12,7 @@ OpenLDAP 2.3.21 Release
Fixed slapd connection index bound check (ITS#4449)
Fixed slapd connection cleanup (ITS#4465)
Fixed slapd slap_realloc misuse (ITS#4477)
Fixed slapd-bdb/hdb cache issue (ITS#4439)
Fixed slapd-ldbm crash on modify bug (ITS#4464)
Fixed slapd-ldap potential bind deadlock (ITS#4409)
Fixed slapd-ldap/meta conn expiration concurrency (ITS#4429)
......
......@@ -180,7 +180,6 @@ struct bdb_info {
u_int32_t bi_txn_cp_kbyte;
struct re_s *bi_txn_cp_task;
struct re_s *bi_index_task;
struct re_s *bi_cache_task;
int bi_lock_detect;
long bi_shm_key;
......
......@@ -186,8 +186,10 @@ bdb_cache_entryinfo_destroy( EntryInfo *e )
} \
(cache)->c_lruhead = (ei); \
(ei)->bei_lruprev = NULL; \
if ( (cache)->c_lrutail == NULL ) { \
(cache)->c_lrutail = (ei); \
if ( !ldap_pvt_thread_mutex_trylock( &(cache)->lru_tail_mutex )) { \
if ( (cache)->c_lrutail == NULL ) \
(cache)->c_lrutail = (ei); \
ldap_pvt_thread_mutex_unlock( &(cache)->lru_tail_mutex ); \
} \
} while(0)
......@@ -529,15 +531,25 @@ int hdb_cache_load(
}
#endif
static void *
bdb_cache_lru_purge(void *ctx, void *arg)
/* caller must have lru_head_mutex locked. mutex
* will be unlocked on return.
*/
static void
bdb_cache_lru_add(
struct bdb_info *bdb,
EntryInfo *ei )
{
struct re_s *rtask = arg;
struct bdb_info *bdb = rtask->arg;
DB_LOCK lock, *lockp;
EntryInfo *elru, *elprev;
int count = 0;
LRU_ADD( &bdb->bi_cache, ei );
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
/* See if we're above the cache size limit */
if ( bdb->bi_cache.c_cursize <= bdb->bi_cache.c_maxsize )
return;
if ( bdb->bi_cache.c_locker ) {
lockp = &lock;
} else {
......@@ -610,63 +622,6 @@ bdb_cache_lru_purge(void *ctx, void *arg)
}
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_tail_mutex );
/* If we're running as a task, drop the task */
if ( ctx ) {
ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
ldap_pvt_runqueue_stoptask( &slapd_rq, rtask );
/* Defer processing till we're needed again */
ldap_pvt_runqueue_resched( &slapd_rq, rtask, 1 );
ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
}
return NULL;
}
/* caller must have lru_head_mutex locked. mutex
* will be unlocked on return.
*/
static void
bdb_cache_lru_add(
struct bdb_info *bdb,
EntryInfo *ei )
{
LRU_ADD( &bdb->bi_cache, ei );
ldap_pvt_thread_mutex_unlock( &bdb->bi_cache.lru_head_mutex );
/* See if we're above the cache size limit */
if ( bdb->bi_cache.c_cursize > bdb->bi_cache.c_maxsize ) {
if ( slapMode & SLAP_TOOL_MODE ) {
struct re_s rtask;
rtask.arg = bdb;
bdb_cache_lru_purge( NULL, &rtask );
} else {
int wake = 0;
ldap_pvt_thread_mutex_lock( &slapd_rq.rq_mutex );
if ( bdb->bi_cache_task ) {
if ( !ldap_pvt_runqueue_isrunning( &slapd_rq,
bdb->bi_cache_task )) {
/* We want it to start right now */
bdb->bi_cache_task->interval.tv_sec = 0;
ldap_pvt_runqueue_resched( &slapd_rq, bdb->bi_cache_task,
0 );
/* But don't try to reschedule it while it's running */
bdb->bi_cache_task->interval.tv_sec = 3600;
wake = 1;
}
} else {
bdb->bi_cache_task = ldap_pvt_runqueue_insert( &slapd_rq, 3600,
bdb_cache_lru_purge, bdb, "bdb_cache_lru_purge",
bdb->bi_dbenv_home );
wake = 1;
}
ldap_pvt_thread_mutex_unlock( &slapd_rq.rq_mutex );
/* Don't bother waking if the purge task is already running */
if ( wake )
slap_wake_listener();
}
}
}
EntryInfo *
......@@ -765,12 +720,12 @@ again: ldap_pvt_thread_rdwr_rlock( &bdb->bi_cache.c_rwlock );
}
#else
rc = hdb_cache_find_parent(op, tid, locker, id, eip );
if ( rc == 0 && *eip ) islocked = 1;
if ( rc == 0 ) islocked = 1;
#endif
}
/* Ok, we found the info, do we have the entry? */
if ( *eip && rc == 0 ) {
if ( rc == 0 ) {
if ( (*eip)->bei_state & CACHE_ENTRY_DELETED ) {
rc = DB_NOTFOUND;
} else {
......
......@@ -258,9 +258,8 @@ int bdb_entry_release(
#endif
}
/* free entry and reader or writer lock */
if ( op ) {
boi = (struct bdb_op_info *)op->o_private;
}
boi = (struct bdb_op_info *)op->o_private;
/* lock is freed with txn */
if ( !boi || boi->boi_txn ) {
bdb_unlocked_cache_return_entry_rw( &bdb->bi_cache, e, rw );
......
......@@ -473,8 +473,7 @@ bdb_idl_cache_del_id(
IDL_LRU_DELETE( bdb, cache_entry );
ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_lrulock );
free( cache_entry->kstr.bv_val );
if ( cache_entry->idl )
free( cache_entry->idl );
free( cache_entry->idl );
free( cache_entry );
}
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment